From e119c2df86602e3d21d419b164e26e605732135e Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 25 Sep 2024 11:28:30 -0700 Subject: [PATCH 001/138] First version of client with endpoint operations --- sdk/ai/azure-ai-client/CHANGELOG.md | 5 + sdk/ai/azure-ai-client/LICENSE | 21 + sdk/ai/azure-ai-client/MANIFEST.in | 7 + sdk/ai/azure-ai-client/README.md | 80 + sdk/ai/azure-ai-client/azure/__init__.py | 1 + sdk/ai/azure-ai-client/azure/ai/__init__.py | 1 + .../azure/ai/client/__init__.py | 26 + .../azure/ai/client/_client.py | 129 + .../azure/ai/client/_configuration.py | 82 + .../azure/ai/client/_model_base.py | 1158 +++++++++ .../azure-ai-client/azure/ai/client/_patch.py | 90 + .../azure/ai/client/_serialization.py | 2115 +++++++++++++++++ .../azure/ai/client/_version.py | 9 + .../azure/ai/client/aio/__init__.py | 23 + .../azure/ai/client/aio/_client.py | 131 + .../azure/ai/client/aio/_configuration.py | 82 + .../azure/ai/client/aio/_patch.py | 20 + .../ai/client/aio/operations/__init__.py | 23 + .../ai/client/aio/operations/_operations.py | 348 +++ .../azure/ai/client/aio/operations/_patch.py | 20 + .../azure/ai/client/models/__init__.py | 37 + .../azure/ai/client/models/_enums.py | 30 + .../azure/ai/client/models/_models.py | 305 +++ .../azure/ai/client/models/_patch.py | 20 + .../azure/ai/client/operations/__init__.py | 23 + .../azure/ai/client/operations/_operations.py | 397 ++++ .../azure/ai/client/operations/_patch.py | 94 + .../azure-ai-client/azure/ai/client/py.typed | 1 + sdk/ai/azure-ai-client/dev_requirements.txt | 4 + .../generated_tests/conftest.py | 35 + .../test_endpoints_operations.py | 45 + .../test_endpoints_operations_async.py | 46 + .../generated_tests/testpreparer.py | 24 + .../generated_tests/testpreparer_async.py | 20 + .../samples/endpoints/sample_endpoints.py | 134 ++ sdk/ai/azure-ai-client/setup.py | 71 + .../tests/endpoints/unit_tests.py | 114 + sdk/ai/azure-ai-client/tsp-location.yaml | 4 + 38 files changed, 5775 insertions(+) create mode 100644 sdk/ai/azure-ai-client/CHANGELOG.md create mode 100644 sdk/ai/azure-ai-client/LICENSE create mode 100644 sdk/ai/azure-ai-client/MANIFEST.in create mode 100644 sdk/ai/azure-ai-client/README.md create mode 100644 sdk/ai/azure-ai-client/azure/__init__.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/__init__.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/__init__.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_client.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_configuration.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_model_base.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_patch.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_serialization.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_version.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/models/_models.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/py.typed create mode 100644 sdk/ai/azure-ai-client/dev_requirements.txt create mode 100644 sdk/ai/azure-ai-client/generated_tests/conftest.py create mode 100644 sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations.py create mode 100644 sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations_async.py create mode 100644 sdk/ai/azure-ai-client/generated_tests/testpreparer.py create mode 100644 sdk/ai/azure-ai-client/generated_tests/testpreparer_async.py create mode 100644 sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py create mode 100644 sdk/ai/azure-ai-client/setup.py create mode 100644 sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py create mode 100644 sdk/ai/azure-ai-client/tsp-location.yaml diff --git a/sdk/ai/azure-ai-client/CHANGELOG.md b/sdk/ai/azure-ai-client/CHANGELOG.md new file mode 100644 index 000000000000..628743d283a9 --- /dev/null +++ b/sdk/ai/azure-ai-client/CHANGELOG.md @@ -0,0 +1,5 @@ +# Release History + +## 1.0.0b1 (1970-01-01) + +- Initial version diff --git a/sdk/ai/azure-ai-client/LICENSE b/sdk/ai/azure-ai-client/LICENSE new file mode 100644 index 000000000000..63447fd8bbbf --- /dev/null +++ b/sdk/ai/azure-ai-client/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) Microsoft Corporation. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/MANIFEST.in b/sdk/ai/azure-ai-client/MANIFEST.in new file mode 100644 index 000000000000..cfc4bcbd9797 --- /dev/null +++ b/sdk/ai/azure-ai-client/MANIFEST.in @@ -0,0 +1,7 @@ +include *.md +include LICENSE +include azure/ai/client/py.typed +recursive-include tests *.py +recursive-include samples *.py *.md +include azure/__init__.py +include azure/ai/__init__.py \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/README.md b/sdk/ai/azure-ai-client/README.md new file mode 100644 index 000000000000..d6bc4ea4c008 --- /dev/null +++ b/sdk/ai/azure-ai-client/README.md @@ -0,0 +1,80 @@ + + +# Azure Ai Client client library for Python + + +## Getting started + +### Install the package + +```bash +python -m pip install azure-ai-client +``` + +#### Prequisites + +- Python 3.8 or later is required to use this package. +- You need an [Azure subscription][azure_sub] to use this package. +- An existing Azure Ai Client instance. +#### Create with an Azure Active Directory Credential +To use an [Azure Active Directory (AAD) token credential][authenticate_with_token], +provide an instance of the desired credential type obtained from the +[azure-identity][azure_identity_credentials] library. + +To authenticate with AAD, you must first [pip][pip] install [`azure-identity`][azure_identity_pip] + +After setup, you can choose which type of [credential][azure_identity_credentials] from azure.identity to use. +As an example, [DefaultAzureCredential][default_azure_credential] can be used to authenticate the client: + +Set the values of the client ID, tenant ID, and client secret of the AAD application as environment variables: +`AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, `AZURE_CLIENT_SECRET` + +Use the returned token credential to authenticate the client: + +```python +>>> from azure.ai.client import Client +>>> from azure.identity import DefaultAzureCredential +>>> client = Client(endpoint='', credential=DefaultAzureCredential()) +``` + +## Examples + +```python +>>> from azure.ai.client import Client +>>> from azure.identity import DefaultAzureCredential +>>> from azure.core.exceptions import HttpResponseError + +>>> client = Client(endpoint='', credential=DefaultAzureCredential()) +>>> try: + + except HttpResponseError as e: + print('service responds error: {}'.format(e.response.json())) + +``` + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +see the Code of Conduct FAQ or contact opencode@microsoft.com with any +additional questions or comments. + + +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token +[azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials +[azure_identity_pip]: https://pypi.org/project/azure-identity/ +[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential +[pip]: https://pypi.org/project/pip/ +[azure_sub]: https://azure.microsoft.com/free/ + diff --git a/sdk/ai/azure-ai-client/azure/__init__.py b/sdk/ai/azure-ai-client/azure/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/__init__.py b/sdk/ai/azure-ai-client/azure/ai/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/client/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/__init__.py new file mode 100644 index 000000000000..7b2a5f45e5ef --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/__init__.py @@ -0,0 +1,26 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._client import Client +from ._version import VERSION + +__version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "Client", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/_client.py new file mode 100644 index 000000000000..9e3bfdf8d053 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/_client.py @@ -0,0 +1,129 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, TYPE_CHECKING +from typing_extensions import Self + +from azure.core import PipelineClient +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse + +from ._configuration import ClientConfiguration +from ._serialization import Deserializer, Serializer +from .operations import AssistantsOperations, EndpointsOperations, EvaluationsOperations + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class Client: # pylint: disable=client-accepts-api-version-keyword + """Client. + + :ivar endpoints: EndpointsOperations operations + :vartype endpoints: azure.ai.client.operations.EndpointsOperations + :ivar assistants: AssistantsOperations operations + :vartype assistants: azure.ai.client.operations.AssistantsOperations + :ivar evaluations: EvaluationsOperations operations + :vartype evaluations: azure.ai.client.operations.EvaluationsOperations + :param subscription_id: The ID of the target subscription. Required. + :type subscription_id: str + :param resource_group_name: The name of the Resource Group. Required. + :type resource_group_name: str + :param workspace_name: The name of the workspace (Azure AI Studio hub). Required. + :type workspace_name: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-07-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + subscription_id: str, + resource_group_name: str, + workspace_name: str, + credential: "TokenCredential", + **kwargs: Any + ) -> None: + _endpoint = "https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}" # pylint: disable=line-too-long + self._config = ClientConfiguration( + subscription_id=subscription_id, + resource_group_name=resource_group_name, + workspace_name=workspace_name, + credential=credential, + **kwargs + ) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + self.endpoints = EndpointsOperations(self._client, self._config, self._serialize, self._deserialize) + self.assistants = AssistantsOperations(self._client, self._config, self._serialize, self._deserialize) + self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> Self: + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py new file mode 100644 index 000000000000..de9b78e5df36 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py @@ -0,0 +1,82 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline import policies + +from ._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class ClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for Client. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param subscription_id: The ID of the target subscription. Required. + :type subscription_id: str + :param resource_group_name: The name of the Resource Group. Required. + :type resource_group_name: str + :param workspace_name: The name of the workspace (Azure AI Studio hub). Required. + :type workspace_name: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-07-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + subscription_id: str, + resource_group_name: str, + workspace_name: str, + credential: "TokenCredential", + **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "2024-07-01-preview") + + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + if resource_group_name is None: + raise ValueError("Parameter 'resource_group_name' must not be None.") + if workspace_name is None: + raise ValueError("Parameter 'workspace_name' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.subscription_id = subscription_id + self.resource_group_name = resource_group_name + self.workspace_name = workspace_name + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-client/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.BearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_model_base.py b/sdk/ai/azure-ai-client/azure/ai/client/_model_base.py new file mode 100644 index 000000000000..12ad7f29c71e --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/_model_base.py @@ -0,0 +1,1158 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access, arguments-differ, signature-differs, broad-except, too-many-lines + +import copy +import calendar +import decimal +import functools +import sys +import logging +import base64 +import re +import typing +import enum +import email.utils +from datetime import datetime, date, time, timedelta, timezone +from json import JSONEncoder +import xml.etree.ElementTree as ET +from typing_extensions import Self +import isodate +from azure.core.exceptions import DeserializationError +from azure.core import CaseInsensitiveEnumMeta +from azure.core.pipeline import PipelineResponse +from azure.core.serialization import _Null + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping + +_LOGGER = logging.getLogger(__name__) + +__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] + +TZ_UTC = timezone.utc +_T = typing.TypeVar("_T") + + +def _timedelta_as_isostr(td: timedelta) -> str: + """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' + + Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython + + :param timedelta td: The timedelta to convert + :rtype: str + :return: ISO8601 version of this timedelta + """ + + # Split seconds to larger units + seconds = td.total_seconds() + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + + days, hours, minutes = list(map(int, (days, hours, minutes))) + seconds = round(seconds, 6) + + # Build date + date_str = "" + if days: + date_str = "%sD" % days + + if hours or minutes or seconds: + # Build time + time_str = "T" + + # Hours + bigger_exists = date_str or hours + if bigger_exists: + time_str += "{:02}H".format(hours) + + # Minutes + bigger_exists = bigger_exists or minutes + if bigger_exists: + time_str += "{:02}M".format(minutes) + + # Seconds + try: + if seconds.is_integer(): + seconds_string = "{:02}".format(int(seconds)) + else: + # 9 chars long w/ leading 0, 6 digits after decimal + seconds_string = "%09.6f" % seconds + # Remove trailing zeros + seconds_string = seconds_string.rstrip("0") + except AttributeError: # int.is_integer() raises + seconds_string = "{:02}".format(seconds) + + time_str += "{}S".format(seconds_string) + else: + time_str = "" + + return "P" + date_str + time_str + + +def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: + encoded = base64.b64encode(o).decode() + if format == "base64url": + return encoded.strip("=").replace("+", "-").replace("/", "_") + return encoded + + +def _serialize_datetime(o, format: typing.Optional[str] = None): + if hasattr(o, "year") and hasattr(o, "hour"): + if format == "rfc7231": + return email.utils.format_datetime(o, usegmt=True) + if format == "unix-timestamp": + return int(calendar.timegm(o.utctimetuple())) + + # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) + if not o.tzinfo: + iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() + else: + iso_formatted = o.astimezone(TZ_UTC).isoformat() + # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) + return iso_formatted.replace("+00:00", "Z") + # Next try datetime.date or datetime.time + return o.isoformat() + + +def _is_readonly(p): + try: + return p._visibility == ["read"] + except AttributeError: + return False + + +class SdkJSONEncoder(JSONEncoder): + """A JSON encoder that's capable of serializing datetime objects and bytes.""" + + def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): + super().__init__(*args, **kwargs) + self.exclude_readonly = exclude_readonly + self.format = format + + def default(self, o): # pylint: disable=too-many-return-statements + if _is_model(o): + if self.exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + return {k: v for k, v in o.items() if k not in readonly_props} + return dict(o.items()) + try: + return super(SdkJSONEncoder, self).default(o) + except TypeError: + if isinstance(o, _Null): + return None + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, self.format) + try: + # First try datetime.datetime + return _serialize_datetime(o, self.format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return super(SdkJSONEncoder, self).default(o) + + +_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") +_VALID_RFC7231 = re.compile( + r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" + r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" +) + + +def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + attr = attr.upper() + match = _VALID_DATE.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + return date_obj + + +def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize RFC7231 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + match = _VALID_RFC7231.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + return email.utils.parsedate_to_datetime(attr) + + +def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: + """Deserialize unix timestamp into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + return datetime.fromtimestamp(attr, TZ_UTC) + + +def _deserialize_date(attr: typing.Union[str, date]) -> date: + """Deserialize ISO-8601 formatted string into Date object. + :param str attr: response string to be deserialized. + :rtype: date + :returns: The date object from that input + """ + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + if isinstance(attr, date): + return attr + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore + + +def _deserialize_time(attr: typing.Union[str, time]) -> time: + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :returns: The time object from that input + """ + if isinstance(attr, time): + return attr + return isodate.parse_time(attr) + + +def _deserialize_bytes(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + return bytes(base64.b64decode(attr)) + + +def _deserialize_bytes_base64(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return bytes(base64.b64decode(encoded)) + + +def _deserialize_duration(attr): + if isinstance(attr, timedelta): + return attr + return isodate.parse_duration(attr) + + +def _deserialize_decimal(attr): + if isinstance(attr, decimal.Decimal): + return attr + return decimal.Decimal(str(attr)) + + +def _deserialize_int_as_str(attr): + if isinstance(attr, int): + return attr + return int(attr) + + +_DESERIALIZE_MAPPING = { + datetime: _deserialize_datetime, + date: _deserialize_date, + time: _deserialize_time, + bytes: _deserialize_bytes, + bytearray: _deserialize_bytes, + timedelta: _deserialize_duration, + typing.Any: lambda x: x, + decimal.Decimal: _deserialize_decimal, +} + +_DESERIALIZE_MAPPING_WITHFORMAT = { + "rfc3339": _deserialize_datetime, + "rfc7231": _deserialize_datetime_rfc7231, + "unix-timestamp": _deserialize_datetime_unix_timestamp, + "base64": _deserialize_bytes, + "base64url": _deserialize_bytes_base64, +} + + +def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if annotation is int and rf and rf._format == "str": + return _deserialize_int_as_str + if rf and rf._format: + return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) + return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore + + +def _get_type_alias_type(module_name: str, alias_name: str): + types = { + k: v + for k, v in sys.modules[module_name].__dict__.items() + if isinstance(v, typing._GenericAlias) # type: ignore + } + if alias_name not in types: + return alias_name + return types[alias_name] + + +def _get_model(module_name: str, model_name: str): + models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} + module_end = module_name.rsplit(".", 1)[0] + models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) + if isinstance(model_name, str): + model_name = model_name.split(".")[-1] + if model_name not in models: + return model_name + return models[model_name] + + +_UNSET = object() + + +class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object + def __init__(self, data: typing.Dict[str, typing.Any]) -> None: + self._data = data + + def __contains__(self, key: typing.Any) -> bool: + return key in self._data + + def __getitem__(self, key: str) -> typing.Any: + return self._data.__getitem__(key) + + def __setitem__(self, key: str, value: typing.Any) -> None: + self._data.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + self._data.__delitem__(key) + + def __iter__(self) -> typing.Iterator[typing.Any]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __ne__(self, other: typing.Any) -> bool: + return not self.__eq__(other) + + def keys(self) -> typing.KeysView[str]: + return self._data.keys() + + def values(self) -> typing.ValuesView[typing.Any]: + return self._data.values() + + def items(self) -> typing.ItemsView[str, typing.Any]: + return self._data.items() + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + try: + return self[key] + except KeyError: + return default + + @typing.overload + def pop(self, key: str) -> typing.Any: ... + + @typing.overload + def pop(self, key: str, default: _T) -> _T: ... + + @typing.overload + def pop(self, key: str, default: typing.Any) -> typing.Any: ... + + def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.pop(key) + return self._data.pop(key, default) + + def popitem(self) -> typing.Tuple[str, typing.Any]: + return self._data.popitem() + + def clear(self) -> None: + self._data.clear() + + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + self._data.update(*args, **kwargs) + + @typing.overload + def setdefault(self, key: str, default: None = None) -> None: ... + + @typing.overload + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... + + def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.setdefault(key) + return self._data.setdefault(key, default) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_model = self.__class__(other) + except Exception: + return False + return self._data == other_model._data + + def __repr__(self) -> str: + return str(self._data) + + +def _is_model(obj: typing.Any) -> bool: + return getattr(obj, "_is_model", False) + + +def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements + if isinstance(o, list): + return [_serialize(x, format) for x in o] + if isinstance(o, dict): + return {k: _serialize(v, format) for k, v in o.items()} + if isinstance(o, set): + return {_serialize(x, format) for x in o} + if isinstance(o, tuple): + return tuple(_serialize(x, format) for x in o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, format) + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, enum.Enum): + return o.value + if isinstance(o, int): + if format == "str": + return str(o) + return o + try: + # First try datetime.datetime + return _serialize_datetime(o, format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return o + + +def _get_rest_field( + attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str +) -> typing.Optional["_RestField"]: + try: + return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) + except StopIteration: + return None + + +def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + if isinstance(value, ET.Element): + value = _deserialize(rf._type, value) + return _serialize(value, rf._format) + + +class Model(_MyMutableMapping): + _is_model = True + # label whether current class's _attr_to_rest_field has been calculated + # could not see _attr_to_rest_field directly because subclass inherits it from parent class + _calculated: typing.Set[str] = set() + + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: + class_name = self.__class__.__name__ + if len(args) > 1: + raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") + dict_to_pass = { + rest_field._rest_name: rest_field._default + for rest_field in self._attr_to_rest_field.values() + if rest_field._default is not _UNSET + } + if args: # pylint: disable=too-many-nested-blocks + if isinstance(args[0], ET.Element): + existed_attr_keys = [] + model_meta = getattr(self, "_xml", {}) + + for rf in self._attr_to_rest_field.values(): + prop_meta = getattr(rf, "_xml", {}) + xml_name = prop_meta.get("name", rf._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + # attribute + if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name)) + continue + + # unwrapped element is array + if prop_meta.get("unwrapped", False): + # unwrapped array could either use prop items meta/prop meta + if prop_meta.get("itemsName"): + xml_name = prop_meta.get("itemsName") + xml_ns = prop_meta.get("itemNs") + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + items = args[0].findall(xml_name) # pyright: ignore + if len(items) > 0: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + continue + + # text element is primitive type + if prop_meta.get("text", False): + if args[0].text is not None: + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text) + continue + + # wrapped element could be normal property or array, it should only have one element + item = args[0].find(xml_name) + if item is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, item) + + # rest thing is additional properties + for e in args[0]: + if e.tag not in existed_attr_keys: + dict_to_pass[e.tag] = _convert_element(e) + else: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) + else: + non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] + if non_attr_kwargs: + # actual type errors only throw the first wrong keyword arg they see, so following that. + raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") + dict_to_pass.update( + { + self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) + for k, v in kwargs.items() + if v is not None + } + ) + super().__init__(dict_to_pass) + + def copy(self) -> "Model": + return Model(self.__dict__) + + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: # pylint: disable=unused-argument + if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: + # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', + # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' + mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order + attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") # pylint: disable=no-member + for k, v in mro_class.__annotations__.items() # pylint: disable=no-member + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") + + return super().__new__(cls) # pylint: disable=no-value-for-parameter + + def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: + for base in cls.__bases__: + if hasattr(base, "__mapping__"): # pylint: disable=no-member + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore # pylint: disable=no-member + + @classmethod + def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: + for v in cls.__dict__.values(): + if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators: + return v + return None + + @classmethod + def _deserialize(cls, data, exist_discriminators): + if not hasattr(cls, "__mapping__"): # pylint: disable=no-member + return cls(data) + discriminator = cls._get_discriminator(exist_discriminators) + if discriminator is None: + return cls(data) + exist_discriminators.append(discriminator._rest_name) + if isinstance(data, ET.Element): + model_meta = getattr(cls, "_xml", {}) + prop_meta = getattr(discriminator, "_xml", {}) + xml_name = prop_meta.get("name", discriminator._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + if data.get(xml_name) is not None: + discriminator_value = data.get(xml_name) + else: + discriminator_value = data.find(xml_name).text # pyright: ignore + else: + discriminator_value = data.get(discriminator._rest_name) + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore # pylint: disable=no-member + return mapped_cls._deserialize(data, exist_discriminators) + + def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: + """Return a dict that can be JSONify using json.dump. + + :keyword bool exclude_readonly: Whether to remove the readonly properties. + :returns: A dict JSON compatible object + :rtype: dict + """ + + result = {} + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] + for k, v in self.items(): + if exclude_readonly and k in readonly_props: # pyright: ignore + continue + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) + return result + + @staticmethod + def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: + if v is None or isinstance(v, _Null): + return None + if isinstance(v, (list, tuple, set)): + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) + if isinstance(v, dict): + return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} + return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v + + +def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + +def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + +def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + +def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj: typing.Dict[typing.Any, typing.Any], +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = {child.tag: child for child in obj} + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + +def _deserialize_multiple_sequence( + entry_deserializers: typing.List[typing.Optional[typing.Callable]], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) + + +def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = list(obj) + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + +def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.Any]: + return sorted( + types, + key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), + ) + + +def _get_deserialize_callable_from_annotation( # pylint: disable=R0911, R0915, R0912 + annotation: typing.Any, + module: typing.Optional[str], + rf: typing.Optional["_RestField"] = None, +) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + if not annotation: + return None + + # is it a type alias? + if isinstance(annotation, str): + if module is not None: + annotation = _get_type_alias_type(module, annotation) + + # is it a forward ref / in quotes? + if isinstance(annotation, (str, typing.ForwardRef)): + try: + model_name = annotation.__forward_arg__ # type: ignore + except AttributeError: + model_name = annotation + if module is not None: + annotation = _get_model(module, model_name) + + try: + if module and _is_model(annotation): + if rf: + rf._is_model = True + + return functools.partial(_deserialize_model, annotation) # pyright: ignore + except Exception: + pass + + # is it a literal? + try: + if annotation.__origin__ is typing.Literal: # pyright: ignore + return None + except AttributeError: + pass + + # is it optional? + try: + if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore + if len(annotation.__args__) <= 2: # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + # the type is Optional[Union[...]], we need to remove the None type from the Union + annotation_copy = copy.copy(annotation) + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore + return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) + except AttributeError: + pass + + # is it union? + if getattr(annotation, "__origin__", None) is typing.Union: + # initial ordering is we make `string` the last deserialization option, because it is often them most generic + deserializers = [ + _get_deserialize_callable_from_annotation(arg, module, rf) + for arg in _sorted_annotations(annotation.__args__) # pyright: ignore + ] + + return functools.partial(_deserialize_with_union, deserializers) + + try: + if annotation._name == "Dict": # pyright: ignore + value_deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[1], module, rf # pyright: ignore + ) + + return functools.partial( + _deserialize_dict, + value_deserializer, + module, + ) + except (AttributeError, IndexError): + pass + try: + if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore + if len(annotation.__args__) > 1: # pyright: ignore + entry_deserializers = [ + _get_deserialize_callable_from_annotation(dt, module, rf) + for dt in annotation.__args__ # pyright: ignore + ] + return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) + deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[0], module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_sequence, deserializer, module) + except (TypeError, IndexError, AttributeError, SyntaxError): + pass + + def _deserialize_default( + deserializer, + obj, + ): + if obj is None: + return obj + try: + return _deserialize_with_callable(deserializer, obj) + except Exception: + pass + return obj + + if get_deserializer(annotation, rf): + return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) + + return functools.partial(_deserialize_default, annotation) + + +def _deserialize_with_callable( + deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], + value: typing.Any, +): # pylint: disable=too-many-return-statements + try: + if value is None or isinstance(value, _Null): + return None + if isinstance(value, ET.Element): + if deserializer is str: + return value.text or "" + if deserializer is int: + return int(value.text) if value.text else None + if deserializer is float: + return float(value.text) if value.text else None + if deserializer is bool: + return value.text == "true" if value.text else None + if deserializer is None: + return value + if deserializer in [int, float, bool]: + return deserializer(value) + if isinstance(deserializer, CaseInsensitiveEnumMeta): + try: + return deserializer(value) + except ValueError: + # for unknown value, return raw value + return value + if isinstance(deserializer, type) and issubclass(deserializer, Model): + return deserializer._deserialize(value, []) + return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) + except Exception as e: + raise DeserializationError() from e + + +def _deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + if isinstance(value, PipelineResponse): + value = value.http_response.json() + if rf is None and format: + rf = _RestField(format=format) + if not isinstance(deserializer, functools.partial): + deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) + return _deserialize_with_callable(deserializer, value) + + +class _RestField: + def __init__( + self, + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + is_discriminator: bool = False, + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, + ): + self._type = type + self._rest_name_input = name + self._module: typing.Optional[str] = None + self._is_discriminator = is_discriminator + self._visibility = visibility + self._is_model = False + self._default = default + self._format = format + self._is_multipart_file_input = is_multipart_file_input + self._xml = xml if xml is not None else {} + + @property + def _class_type(self) -> typing.Any: + return getattr(self._type, "args", [None])[0] + + @property + def _rest_name(self) -> str: + if self._rest_name_input is None: + raise ValueError("Rest name was never set") + return self._rest_name_input + + def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin + # by this point, type and rest_name will have a value bc we default + # them in __new__ of the Model class + item = obj.get(self._rest_name) + if item is None: + return item + if self._is_model: + return item + return _deserialize(self._type, _serialize(item, self._format), rf=self) + + def __set__(self, obj: Model, value) -> None: + if value is None: + # we want to wipe out entries if users set attr to None + try: + obj.__delitem__(self._rest_name) + except KeyError: + pass + return + if self._is_model: + if not _is_model(value): + value = _deserialize(self._type, value) + obj.__setitem__(self._rest_name, value) + return + obj.__setitem__(self._rest_name, _serialize(value, self._format)) + + def _get_deserialize_callable_from_annotation( + self, annotation: typing.Any + ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + return _get_deserialize_callable_from_annotation(annotation, self._module, self) + + +def rest_field( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + xml=xml, + ) + + +def rest_discriminator( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) + + +def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: + """Serialize a model to XML. + + :param Model model: The model to serialize. + :param bool exclude_readonly: Whether to exclude readonly properties. + :returns: The XML representation of the model. + :rtype: str + """ + return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore + + +def _get_element( + o: typing.Any, + exclude_readonly: bool = False, + parent_meta: typing.Optional[typing.Dict[str, typing.Any]] = None, + wrapped_element: typing.Optional[ET.Element] = None, +) -> typing.Union[ET.Element, typing.List[ET.Element]]: + if _is_model(o): + model_meta = getattr(o, "_xml", {}) + + # if prop is a model, then use the prop element directly, else generate a wrapper of model + if wrapped_element is None: + wrapped_element = _create_xml_element( + model_meta.get("name", o.__class__.__name__), + model_meta.get("prefix"), + model_meta.get("ns"), + ) + + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + + for k, v in o.items(): + # do not serialize readonly properties + if exclude_readonly and k in readonly_props: + continue + + prop_rest_field = _get_rest_field(o._attr_to_rest_field, k) + if prop_rest_field: + prop_meta = getattr(prop_rest_field, "_xml").copy() + # use the wire name as xml name if no specific name is set + if prop_meta.get("name") is None: + prop_meta["name"] = k + else: + # additional properties will not have rest field, use the wire name as xml name + prop_meta = {"name": k} + + # if no ns for prop, use model's + if prop_meta.get("ns") is None and model_meta.get("ns"): + prop_meta["ns"] = model_meta.get("ns") + prop_meta["prefix"] = model_meta.get("prefix") + + if prop_meta.get("unwrapped", False): + # unwrapped could only set on array + wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta)) + elif prop_meta.get("text", False): + # text could only set on primitive type + wrapped_element.text = _get_primitive_type_value(v) + elif prop_meta.get("attribute", False): + xml_name = prop_meta.get("name", k) + if prop_meta.get("ns"): + ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore + xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore + # attribute should be primitive type + wrapped_element.set(xml_name, _get_primitive_type_value(v)) + else: + # other wrapped prop element + wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta)) + return wrapped_element + if isinstance(o, list): + return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore + if isinstance(o, dict): + result = [] + for k, v in o.items(): + result.append( + _get_wrapped_element( + v, + exclude_readonly, + { + "name": k, + "ns": parent_meta.get("ns") if parent_meta else None, + "prefix": parent_meta.get("prefix") if parent_meta else None, + }, + ) + ) + return result + + # primitive case need to create element based on parent_meta + if parent_meta: + return _get_wrapped_element( + o, + exclude_readonly, + { + "name": parent_meta.get("itemsName", parent_meta.get("name")), + "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")), + "ns": parent_meta.get("itemsNs", parent_meta.get("ns")), + }, + ) + + raise ValueError("Could not serialize value into xml: " + o) + + +def _get_wrapped_element( + v: typing.Any, + exclude_readonly: bool, + meta: typing.Optional[typing.Dict[str, typing.Any]], +) -> ET.Element: + wrapped_element = _create_xml_element( + meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None + ) + if isinstance(v, (dict, list)): + wrapped_element.extend(_get_element(v, exclude_readonly, meta)) + elif _is_model(v): + _get_element(v, exclude_readonly, meta, wrapped_element) + else: + wrapped_element.text = _get_primitive_type_value(v) + return wrapped_element + + +def _get_primitive_type_value(v) -> str: + if v is True: + return "true" + if v is False: + return "false" + if isinstance(v, _Null): + return "" + return str(v) + + +def _create_xml_element(tag, prefix=None, ns=None): + if prefix and ns: + ET.register_namespace(prefix, ns) + if ns: + return ET.Element("{" + ns + "}" + tag) + return ET.Element(tag) + + +def _deserialize_xml( + deserializer: typing.Any, + value: str, +) -> typing.Any: + element = ET.fromstring(value) # nosec + return _deserialize(deserializer, element) + + +def _convert_element(e: ET.Element): + # dict case + if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: + dict_result: typing.Dict[str, typing.Any] = {} + for child in e: + if dict_result.get(child.tag) is not None: + if isinstance(dict_result[child.tag], list): + dict_result[child.tag].append(_convert_element(child)) + else: + dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)] + else: + dict_result[child.tag] = _convert_element(child) + dict_result.update(e.attrib) + return dict_result + # array case + if len(e) > 0: + array_result: typing.List[typing.Any] = [] + for child in e: + array_result.append(_convert_element(child)) + return array_result + # primitive case + return e.text diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py new file mode 100644 index 000000000000..d3abe1b99b97 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -0,0 +1,90 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +import datetime +import logging +import base64 +import json +from typing import List, Tuple, Union +from azure.core.credentials import TokenCredential, AccessToken +from ._client import Client as ClientGenerated + +logger = logging.getLogger(__name__) + +# This is only done to rename the client. Can we do this in TypeSpec? +class AzureAIClient(ClientGenerated): + pass + +class SASTokenCredential(TokenCredential): + def __init__( + self, + *, + sas_token: str, + credential: TokenCredential, + subscription_id: str, + resource_group_name: str, + workspace_name: str, + connection_name: str + ): + self._sas_token = sas_token + self._credential = credential + self._subscription_id = subscription_id + self._resource_group_name = resource_group_name + self._workspace_name = workspace_name + self._connection_name = connection_name + self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) + logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) + + @classmethod + def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime: + payload = jwt_token.split('.')[1] + padded_payload = payload + '=' * (4 - len(payload) % 4) # Add padding if necessary + decoded_bytes = base64.urlsafe_b64decode(padded_payload) + decoded_str = decoded_bytes.decode('utf-8') + decoded_payload = json.loads(decoded_str) + expiration_date = decoded_payload.get('exp') + return datetime.datetime.fromtimestamp(expiration_date, datetime.timezone.utc) + + def _refresh_token(self) -> None: + logger.debug("[SASTokenCredential._refresh_token] Enter") + ai_client = ClientGenerated( + credential=self._credential, + subscription_id=self._subscription_id, + resource_group_name=self._resource_group_name, + workspace_name=self._workspace_name, + ) + + connection = ai_client.connections.get( + connection_name=self._connection_name, + populate_secrets=True + ) + + self._sas_token = connection.properties.credentials.sas + self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) + logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) + + def get_token(self) -> AccessToken: + logger.debug("SASTokenCredential.get_token] Enter") + if self._expires_on < datetime.datetime.now(datetime.timezone.utc): + self._refresh_token() + return AccessToken(self._sas_token, self._expires_on.timestamp()) + + +__all__: List[str] = [ + "AzureAIClient", + "SASTokenCredential" +] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py b/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py new file mode 100644 index 000000000000..01a226bd7f14 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py @@ -0,0 +1,2115 @@ +# pylint: disable=too-many-lines +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Dict, + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + TypeVar, + MutableMapping, + Type, + List, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore + +from azure.core.exceptions import DeserializationError, SerializationError +from azure.core.serialization import NULL as CoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +ModelType = TypeVar("ModelType", bound="Model") +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + :return: The deserialized data. + :rtype: object + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) from err + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError as err: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise DeserializationError("XML is invalid") from err + elif content_type.startswith("text/"): + return data_as_str + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + + :param bytes body_bytes: The body of the response. + :param dict headers: The headers of the response. + :returns: The deserialized data. + :rtype: object + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + + +class UTC(datetime.tzinfo): + """Time Zone info for handling UTC""" + + def utcoffset(self, dt): + """UTF offset for UTC is 0. + + :param datetime.datetime dt: The datetime + :returns: The offset + :rtype: datetime.timedelta + """ + return datetime.timedelta(0) + + def tzname(self, dt): + """Timestamp representation. + + :param datetime.datetime dt: The datetime + :returns: The timestamp representation + :rtype: str + """ + return "Z" + + def dst(self, dt): + """No daylight saving for UTC. + + :param datetime.datetime dt: The datetime + :returns: The daylight saving time + :rtype: datetime.timedelta + """ + return datetime.timedelta(hours=1) + + +try: + from datetime import timezone as _FixedOffset # type: ignore +except ImportError: # Python 2.7 + + class _FixedOffset(datetime.tzinfo): # type: ignore + """Fixed offset in minutes east from UTC. + Copy/pasted from Python doc + :param datetime.timedelta offset: offset in timedelta format + """ + + def __init__(self, offset): + self.__offset = offset + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return str(self.__offset.total_seconds() / 3600) + + def __repr__(self): + return "".format(self.tzname(None)) + + def dst(self, dt): + return datetime.timedelta(0) + + def __getinitargs__(self): + return (self.__offset,) + + +try: + from datetime import timezone + + TZ_UTC = timezone.utc +except ImportError: + TZ_UTC = UTC() # type: ignore + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Optional[Dict[str, Any]] = {} + for k in kwargs: # pylint: disable=consider-using-dict-items + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are equal + :rtype: bool + """ + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are not equal + :rtype: bool + """ + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node. + + :returns: The XML node + :rtype: xml.etree.ElementTree.Element + """ + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to server from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, keep_readonly=keep_readonly, **kwargs + ) + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs + ) + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: # pylint: disable=broad-exception-caught + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + :rtype: ModelType + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def from_dict( + cls: Type[ModelType], + data: Any, + key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> ModelType: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param function key_extractors: A key extractor function. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + :rtype: ModelType + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result.update(objects[valuetype]._flatten_subtype(key, objects)) # pylint: disable=protected-access + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + Remove the polymorphic key from the initial data. + + :param dict response: The initial data + :param dict objects: The class objects + :returns: The class to be used + :rtype: class + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.pop(rest_api_response_key, None) or response.pop(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + :returns: The decoded key + :rtype: str + """ + return key.replace("\\.", ".") + + +class Serializer(object): # pylint: disable=too-many-public-methods + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, type]] = None): + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals + self, target_obj, data_type=None, **kwargs + ): + """Serialize data into a string according to type. + + :param object target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises: SerializationError if serialization fails. + :returns: The serialized data. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() # pylint: disable=protected-access + try: + attributes = target_obj._attribute_map # pylint: disable=protected-access + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access + attr_name, {} + ).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized.update(target_obj.additional_properties) + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, + # we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise SerializationError(msg) from err + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises: SerializationError if serialization fails. + :raises: ValueError if data is None + :returns: The serialized request body + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access + except DeserializationError as err: + raise SerializationError("Unable to build a model: " + str(err)) from err + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param str name: The name of the URL path parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :returns: The serialized URL path + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + output = output.replace("{", quote("{")).replace("}", quote("}")) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param str name: The name of the query parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, list + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + :returns: The serialized query parameter + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get("skip_quote", False) + return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param str name: The name of the header. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + :returns: The serialized header + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :raises: AttributeError if required data is None. + :raises: ValueError if data is None + :raises: SerializationError if serialization fails. + :returns: The serialized data. + :rtype: str, int, float, bool, dict, list + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is CoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + if data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, data.__class__) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise SerializationError(msg.format(data, data_type)) from err + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param obj data: Object to be serialized. + :param str data_type: Type of object in the iterable. + :rtype: str, int, float, bool + :return: serialized object + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec # pylint: disable=eval-used + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param str data: Object to be serialized. + :rtype: str + :return: serialized object + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list data: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + Defaults to False. + :rtype: list, str + :return: serialized iterable + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get("do_quote", False): + serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :rtype: dict + :return: serialized dictionary + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + :return: serialized object + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + if obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError as exc: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) from exc + + @staticmethod + def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument + """Serialize bytearray into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument + """Serialize str into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Decimal object to float. + + :param decimal attr: Object to be serialized. + :rtype: float + :return: serialized decimal + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): # pylint: disable=unused-argument + """Serialize long (Py2) or int (Py3). + + :param int attr: Object to be serialized. + :rtype: int/long + :return: serialized long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + :return: serialized date + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + :return: serialized time + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + :return: serialized duration + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: TypeError if format invalid. + :return: serialized rfc + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError as exc: + raise TypeError("RFC1123 object must be valid Datetime object.") from exc + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: SerializationError if format invalid. + :return: serialized iso + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise SerializationError(msg) from err + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise TypeError(msg) from err + + @staticmethod + def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises: SerializationError if format invalid + :return: serialied unix + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError as exc: + raise TypeError("Unix time object must be valid Datetime object.") from exc + + +def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(List[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements + attr, attr_desc, data +): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( # pylint: disable=line-too-long + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer(object): + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, type]] = None): + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, str): + return self.deserialize_data(data, response) + if isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None or data is CoreNull: + return data + try: + attributes = response._attribute_map # type: ignore # pylint: disable=protected-access + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise DeserializationError(msg) from err + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :return: The classified target object and its class name. + :rtype: tuple + """ + if target is None: + return None, None + + if isinstance(target, str): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + :return: Deserialized object. + :rtype: object + """ + try: + return self(target_obj, data, content_type=content_type) + except: # pylint: disable=bare-except + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param obj raw_data: Data to be processed. + :param str content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + :rtype: object + :return: Unpacked content. + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param Response response: The response model class. + :param dict attrs: The deserialized response attributes. + :param dict additional_properties: Additional properties to be set. + :rtype: Response + :return: The instantiated response model. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [ + k for k, v in response._validation.items() if v.get("readonly") # pylint: disable=protected-access + ] + const = [ + k for k, v in response._validation.items() if v.get("constant") # pylint: disable=protected-access + ] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) from err + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) from exp + + def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment + "object", + "[]", + r"{}", + ] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise DeserializationError(msg) from err + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :return: Deserialized iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :return: Deserialized dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :return: Deserialized object. + :rtype: dict + :raises: TypeError if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, str): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :return: Deserialized basic type. + :rtype: str, int, float or bool + :raises: TypeError if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + if isinstance(attr, str): + if attr.lower() in ["true", "1"]: + return True + if attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec # pylint: disable=eval-used + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :return: Deserialized string. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :return: Deserialized enum object. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + try: + return list(enum_obj.__members__.values())[data] + except IndexError as exc: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) from exc + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :return: Deserialized bytearray + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :return: Deserialized base64 string + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :return: Deserialized decimal + :raises: DeserializationError if string format invalid. + :rtype: decimal + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(str(attr)) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise DeserializationError(msg) from err + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :return: Deserialized int + :rtype: long or int + :raises: ValueError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :return: Deserialized duration + :rtype: TimeDelta + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise DeserializationError(msg) from err + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :return: Deserialized date + :rtype: Date + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=0, defaultday=0) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :return: Deserialized time + :rtype: datetime.time + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized RFC datetime + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized ISO datetime + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :return: Deserialized datetime + :rtype: Datetime + :raises: DeserializationError if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + attr = int(attr) + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise DeserializationError(msg) from err + return date_obj diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_version.py b/sdk/ai/azure-ai-client/azure/ai/client/_version.py new file mode 100644 index 000000000000..be71c81bd282 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0.0b1" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py new file mode 100644 index 000000000000..80bfbb6d392d --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._client import Client + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "Client", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py new file mode 100644 index 000000000000..1548ec7b0de3 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py @@ -0,0 +1,131 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, TYPE_CHECKING +from typing_extensions import Self + +from azure.core import AsyncPipelineClient +from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest + +from .._serialization import Deserializer, Serializer +from ._configuration import ClientConfiguration +from .operations import AssistantsOperations, EndpointsOperations, EvaluationsOperations + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class Client: # pylint: disable=client-accepts-api-version-keyword + """Client. + + :ivar endpoints: EndpointsOperations operations + :vartype endpoints: azure.ai.client.aio.operations.EndpointsOperations + :ivar assistants: AssistantsOperations operations + :vartype assistants: azure.ai.client.aio.operations.AssistantsOperations + :ivar evaluations: EvaluationsOperations operations + :vartype evaluations: azure.ai.client.aio.operations.EvaluationsOperations + :param subscription_id: The ID of the target subscription. Required. + :type subscription_id: str + :param resource_group_name: The name of the Resource Group. Required. + :type resource_group_name: str + :param workspace_name: The name of the workspace (Azure AI Studio hub). Required. + :type workspace_name: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-07-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + subscription_id: str, + resource_group_name: str, + workspace_name: str, + credential: "AsyncTokenCredential", + **kwargs: Any + ) -> None: + _endpoint = "https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}" # pylint: disable=line-too-long + self._config = ClientConfiguration( + subscription_id=subscription_id, + resource_group_name=resource_group_name, + workspace_name=workspace_name, + credential=credential, + **kwargs + ) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + self.endpoints = EndpointsOperations(self._client, self._config, self._serialize, self._deserialize) + self.assistants = AssistantsOperations(self._client, self._config, self._serialize, self._deserialize) + self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py new file mode 100644 index 000000000000..de195b03fcda --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py @@ -0,0 +1,82 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline import policies + +from .._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class ClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for Client. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param subscription_id: The ID of the target subscription. Required. + :type subscription_id: str + :param resource_group_name: The name of the Resource Group. Required. + :type resource_group_name: str + :param workspace_name: The name of the workspace (Azure AI Studio hub). Required. + :type workspace_name: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-07-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + subscription_id: str, + resource_group_name: str, + workspace_name: str, + credential: "AsyncTokenCredential", + **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "2024-07-01-preview") + + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + if resource_group_name is None: + raise ValueError("Parameter 'resource_group_name' must not be None.") + if workspace_name is None: + raise ValueError("Parameter 'workspace_name' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.subscription_id = subscription_id + self.resource_group_name = resource_group_name + self.workspace_name = workspace_name + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-client/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py new file mode 100644 index 000000000000..6e2dd3e8d726 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import EndpointsOperations +from ._operations import AssistantsOperations +from ._operations import EvaluationsOperations + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "EndpointsOperations", + "AssistantsOperations", + "EvaluationsOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py new file mode 100644 index 000000000000..29476aacc7b8 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py @@ -0,0 +1,348 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, Callable, Dict, IO, Optional, Type, TypeVar, Union, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._model_base import SdkJSONEncoder, _deserialize +from ...operations._operations import build_endpoints_list_request, build_endpoints_list_secrets_request + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class EndpointsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.client.aio.Client`'s + :attr:`endpoints` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def list(self, **kwargs: Any) -> _models.ConnectionsListResponse: + """List the details of all the connections (not including their credentials). + + :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ConnectionsListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ConnectionsListResponse] = kwargs.pop("cls", None) + + _request = build_endpoints_list_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ConnectionsListResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def list_secrets( + self, connection_name_in_url: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ConnectionsListSecretsResponse: + """Get the details of a single connection, including credential (if available). + + :param connection_name_in_url: Connection Name. Required. + :type connection_name_in_url: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def list_secrets( + self, + connection_name_in_url: str, + *, + connection_name: str, + subscription_id: str, + resource_group_name: str, + workspace_name: str, + api_version_in_body: str, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ConnectionsListSecretsResponse: + """Get the details of a single connection, including credential (if available). + + :param connection_name_in_url: Connection Name. Required. + :type connection_name_in_url: str + :keyword connection_name: Connection Name (should be the same as the connection name in the URL + path). Required. + :paramtype connection_name: str + :keyword subscription_id: The ID of the target subscription. Required. + :paramtype subscription_id: str + :keyword resource_group_name: The name of the Resource Group. Required. + :paramtype resource_group_name: str + :keyword workspace_name: The name of the workspace (Azure AI Studio hub). Required. + :paramtype workspace_name: str + :keyword api_version_in_body: The api version. Required. + :paramtype api_version_in_body: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def list_secrets( + self, connection_name_in_url: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ConnectionsListSecretsResponse: + """Get the details of a single connection, including credential (if available). + + :param connection_name_in_url: Connection Name. Required. + :type connection_name_in_url: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def list_secrets( + self, + connection_name_in_url: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + connection_name: str = _Unset, + subscription_id: str = _Unset, + resource_group_name: str = _Unset, + workspace_name: str = _Unset, + api_version_in_body: str = _Unset, + **kwargs: Any + ) -> _models.ConnectionsListSecretsResponse: + """Get the details of a single connection, including credential (if available). + + :param connection_name_in_url: Connection Name. Required. + :type connection_name_in_url: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword connection_name: Connection Name (should be the same as the connection name in the URL + path). Required. + :paramtype connection_name: str + :keyword subscription_id: The ID of the target subscription. Required. + :paramtype subscription_id: str + :keyword resource_group_name: The name of the Resource Group. Required. + :paramtype resource_group_name: str + :keyword workspace_name: The name of the workspace (Azure AI Studio hub). Required. + :paramtype workspace_name: str + :keyword api_version_in_body: The api version. Required. + :paramtype api_version_in_body: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + + if body is _Unset: + if connection_name is _Unset: + raise TypeError("missing required argument: connection_name") + if subscription_id is _Unset: + raise TypeError("missing required argument: subscription_id") + if resource_group_name is _Unset: + raise TypeError("missing required argument: resource_group_name") + if workspace_name is _Unset: + raise TypeError("missing required argument: workspace_name") + if api_version_in_body is _Unset: + raise TypeError("missing required argument: api_version_in_body") + body = { + "apiVersionInBody": api_version_in_body, + "connectionName": connection_name, + "resourceGroupName": resource_group_name, + "subscriptionId": subscription_id, + "workspaceName": workspace_name, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_endpoints_list_secrets_request( + connection_name_in_url=connection_name_in_url, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ConnectionsListSecretsResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class AssistantsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.client.aio.Client`'s + :attr:`assistants` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + +class EvaluationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.client.aio.Client`'s + :attr:`evaluations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py new file mode 100644 index 000000000000..9c5a659cca98 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._models import ConnectionProperties +from ._models import ConnectionPropertiesAADAuth +from ._models import ConnectionPropertiesApiKeyAuth +from ._models import ConnectionPropertiesSASAuth +from ._models import ConnectionsListResponse +from ._models import ConnectionsListSecretsResponse +from ._models import CredentialsApiKeyAuth +from ._models import CredentialsSASAuth + +from ._enums import AuthenticationType +from ._enums import ConnectionType +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ConnectionProperties", + "ConnectionPropertiesAADAuth", + "ConnectionPropertiesApiKeyAuth", + "ConnectionPropertiesSASAuth", + "ConnectionsListResponse", + "ConnectionsListSecretsResponse", + "CredentialsApiKeyAuth", + "CredentialsSASAuth", + "AuthenticationType", + "ConnectionType", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py new file mode 100644 index 000000000000..0ba289a9f795 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class AuthenticationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """to do.""" + + API_KEY = "ApiKey" + """API Key authentication""" + AAD = "AAD" + """Entra ID authentication""" + SAS = "SAS" + """Shared Access Signature (SAS) authentication""" + + +class ConnectionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The Type (or category) of the connection.""" + + AZURE_OPEN_AI = "AzureOpenAI" + """Azure OpenAI""" + SERVERLESS = "Serverless" + """Serverless API""" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py new file mode 100644 index 000000000000..224cf16d8e16 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py @@ -0,0 +1,305 @@ +# coding=utf-8 +# pylint: disable=too-many-lines +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, Dict, List, Literal, Mapping, TYPE_CHECKING, Union, overload + +from .. import _model_base +from .._model_base import rest_discriminator, rest_field +from ._enums import AuthenticationType + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from .. import models as _models + + +class ConnectionProperties(_model_base.Model): + """to do. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ConnectionPropertiesAADAuth, ConnectionPropertiesApiKeyAuth, ConnectionPropertiesSASAuth + + + :ivar auth_type: Authentication type of the connection target. Required. Known values are: + "ApiKey", "AAD", and "SAS". + :vartype auth_type: str or ~azure.ai.client.models.AuthenticationType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + auth_type: str = rest_discriminator(name="authType") + """Authentication type of the connection target. Required. Known values are: \"ApiKey\", \"AAD\", + and \"SAS\".""" + + @overload + def __init__( + self, + *, + auth_type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): + """Connection properties for connections with AAD authentication (aka ``Entra ID passthrough``\\ + ). + + + :ivar auth_type: Authentication type of the connection target. Required. Entra ID + authentication + :vartype auth_type: str or ~azure.ai.client.models.AAD + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and + "Serverless". + :vartype category: str or ~azure.ai.client.models.ConnectionType + :ivar target: to do. Required. + :vartype target: str + """ + + auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. Entra ID authentication""" + category: Union[str, "_models.ConnectionType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" + target: str = rest_field() + """to do. Required.""" + + @overload + def __init__( + self, + *, + category: Union[str, "_models.ConnectionType"], + target: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, auth_type=AuthenticationType.AAD, **kwargs) + + +class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey"): + """Connection properties for connections with API key authentication. + + + :ivar auth_type: Authentication type of the connection target. Required. API Key authentication + :vartype auth_type: str or ~azure.ai.client.models.API_KEY + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and + "Serverless". + :vartype category: str or ~azure.ai.client.models.ConnectionType + :ivar credentials: Credentials will only be present for authType=ApiKey. Required. + :vartype credentials: ~azure.ai.client.models.CredentialsApiKeyAuth + :ivar target: to do. Required. + :vartype target: str + """ + + auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. API Key authentication""" + category: Union[str, "_models.ConnectionType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" + credentials: "_models.CredentialsApiKeyAuth" = rest_field() + """Credentials will only be present for authType=ApiKey. Required.""" + target: str = rest_field() + """to do. Required.""" + + @overload + def __init__( + self, + *, + category: Union[str, "_models.ConnectionType"], + credentials: "_models.CredentialsApiKeyAuth", + target: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, auth_type=AuthenticationType.API_KEY, **kwargs) + + +class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): + """Connection properties for connections with SAS authentication. + + + :ivar auth_type: Authentication type of the connection target. Required. Shared Access + Signature (SAS) authentication + :vartype auth_type: str or ~azure.ai.client.models.SAS + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and + "Serverless". + :vartype category: str or ~azure.ai.client.models.ConnectionType + :ivar credentials: Credentials will only be present for authType=ApiKey. Required. + :vartype credentials: ~azure.ai.client.models.CredentialsSASAuth + :ivar target: to do. Required. + :vartype target: str + """ + + auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. Shared Access Signature (SAS) + authentication""" + category: Union[str, "_models.ConnectionType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" + credentials: "_models.CredentialsSASAuth" = rest_field() + """Credentials will only be present for authType=ApiKey. Required.""" + target: str = rest_field() + """to do. Required.""" + + @overload + def __init__( + self, + *, + category: Union[str, "_models.ConnectionType"], + credentials: "_models.CredentialsSASAuth", + target: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, auth_type=AuthenticationType.SAS, **kwargs) + + +class ConnectionsListResponse(_model_base.Model): + """to do. + + + :ivar value: to do. Required. + :vartype value: list[~azure.ai.client.models.ConnectionsListSecretsResponse] + """ + + value: List["_models.ConnectionsListSecretsResponse"] = rest_field() + """to do. Required.""" + + @overload + def __init__( + self, + *, + value: List["_models.ConnectionsListSecretsResponse"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ConnectionsListSecretsResponse(_model_base.Model): + """to do. + + + :ivar name: The name of the resource. Required. + :vartype name: str + :ivar properties: The properties of the resource. Required. + :vartype properties: ~azure.ai.client.models.ConnectionProperties + """ + + name: str = rest_field() + """The name of the resource. Required.""" + properties: "_models.ConnectionProperties" = rest_field() + """The properties of the resource. Required.""" + + @overload + def __init__( + self, + *, + name: str, + properties: "_models.ConnectionProperties", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class CredentialsApiKeyAuth(_model_base.Model): + """to do. + + + :ivar key: to do. Required. + :vartype key: str + """ + + key: str = rest_field() + """to do. Required.""" + + @overload + def __init__( + self, + *, + key: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class CredentialsSASAuth(_model_base.Model): + """to do. + + + :ivar sas: to do. Required. + :vartype sas: str + """ + + sas: str = rest_field(name="SAS") + """to do. Required.""" + + @overload + def __init__( + self, + *, + sas: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py new file mode 100644 index 000000000000..6e2dd3e8d726 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import EndpointsOperations +from ._operations import AssistantsOperations +from ._operations import EvaluationsOperations + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "EndpointsOperations", + "AssistantsOperations", + "EvaluationsOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py new file mode 100644 index 000000000000..0bb895c97d2a --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py @@ -0,0 +1,397 @@ +# pylint: disable=too-many-lines,too-many-statements +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, Callable, Dict, IO, Optional, Type, TypeVar, Union, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._model_base import SdkJSONEncoder, _deserialize +from .._serialization import Serializer + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_endpoints_list_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/connections" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_endpoints_list_secrets_request(connection_name_in_url: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/connections/{connectionNameInUrl}/listsecrets" + path_format_arguments = { + "connectionNameInUrl": _SERIALIZER.url("connection_name_in_url", connection_name_in_url, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +class EndpointsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.client.Client`'s + :attr:`endpoints` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def list(self, **kwargs: Any) -> _models.ConnectionsListResponse: + """List the details of all the connections (not including their credentials). + + :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ConnectionsListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ConnectionsListResponse] = kwargs.pop("cls", None) + + _request = build_endpoints_list_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ConnectionsListResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def list_secrets( + self, connection_name_in_url: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ConnectionsListSecretsResponse: + """Get the details of a single connection, including credential (if available). + + :param connection_name_in_url: Connection Name. Required. + :type connection_name_in_url: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def list_secrets( + self, + connection_name_in_url: str, + *, + connection_name: str, + subscription_id: str, + resource_group_name: str, + workspace_name: str, + api_version_in_body: str, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.ConnectionsListSecretsResponse: + """Get the details of a single connection, including credential (if available). + + :param connection_name_in_url: Connection Name. Required. + :type connection_name_in_url: str + :keyword connection_name: Connection Name (should be the same as the connection name in the URL + path). Required. + :paramtype connection_name: str + :keyword subscription_id: The ID of the target subscription. Required. + :paramtype subscription_id: str + :keyword resource_group_name: The name of the Resource Group. Required. + :paramtype resource_group_name: str + :keyword workspace_name: The name of the workspace (Azure AI Studio hub). Required. + :paramtype workspace_name: str + :keyword api_version_in_body: The api version. Required. + :paramtype api_version_in_body: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def list_secrets( + self, connection_name_in_url: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ConnectionsListSecretsResponse: + """Get the details of a single connection, including credential (if available). + + :param connection_name_in_url: Connection Name. Required. + :type connection_name_in_url: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def list_secrets( + self, + connection_name_in_url: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + connection_name: str = _Unset, + subscription_id: str = _Unset, + resource_group_name: str = _Unset, + workspace_name: str = _Unset, + api_version_in_body: str = _Unset, + **kwargs: Any + ) -> _models.ConnectionsListSecretsResponse: + """Get the details of a single connection, including credential (if available). + + :param connection_name_in_url: Connection Name. Required. + :type connection_name_in_url: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword connection_name: Connection Name (should be the same as the connection name in the URL + path). Required. + :paramtype connection_name: str + :keyword subscription_id: The ID of the target subscription. Required. + :paramtype subscription_id: str + :keyword resource_group_name: The name of the Resource Group. Required. + :paramtype resource_group_name: str + :keyword workspace_name: The name of the workspace (Azure AI Studio hub). Required. + :paramtype workspace_name: str + :keyword api_version_in_body: The api version. Required. + :paramtype api_version_in_body: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + + if body is _Unset: + if connection_name is _Unset: + raise TypeError("missing required argument: connection_name") + if subscription_id is _Unset: + raise TypeError("missing required argument: subscription_id") + if resource_group_name is _Unset: + raise TypeError("missing required argument: resource_group_name") + if workspace_name is _Unset: + raise TypeError("missing required argument: workspace_name") + if api_version_in_body is _Unset: + raise TypeError("missing required argument: api_version_in_body") + body = { + "apiVersionInBody": api_version_in_body, + "connectionName": connection_name, + "resourceGroupName": resource_group_name, + "subscriptionId": subscription_id, + "workspaceName": workspace_name, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_endpoints_list_secrets_request( + connection_name_in_url=connection_name_in_url, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ConnectionsListSecretsResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class AssistantsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.client.Client`'s + :attr:`assistants` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + +class EvaluationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.client.Client`'s + :attr:`evaluations` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py new file mode 100644 index 000000000000..c8d361f1024f --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -0,0 +1,94 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List, Iterable +#from zoneinfo import ZoneInfo +from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated +from ..models._enums import AuthenticationType, ConnectionType +from ..models._models import ConnectionsListSecretsResponse, ConnectionsListResponse + +class ConnectionsOperations(ConnectionsOperationsGenerated): + + def get( + self, + *, + connection_name: str, + populate_secrets: bool = False + ) -> ConnectionsListSecretsResponse: + if not connection_name: + raise ValueError("connection_name cannot be empty") + if populate_secrets: + connection: ConnectionsListSecretsResponse = self._list_secrets( + connection_name_in_url=connection_name, + connection_name=connection_name, + subscription_id=self._config.subscription_id, + resource_group_name=self._config.resource_group_name, + workspace_name=self._config.workspace_name, + api_version_in_body=self._config.api_version, + ) + if connection.properties.auth_type == AuthenticationType.AAD: + connection.properties.token_credential = self._config.credential + return connection + elif connection.properties.auth_type == AuthenticationType.SAS: + from .._patch import SASTokenCredential + connection.properties.token_credentials = SASTokenCredential( + sas_token=connection.properties.credentials.sas, + credential=self._config.credential, + subscription_id=self._config.subscription_id, + resource_group_name=self._config.resource_group_name, + workspace_name=self._config.workspace_name, + connection_name=connection_name) + return connection + + return connection + else: + internal_response: ConnectionsListResponse = self._list() + for connection in internal_response.value: + if connection_name == connection.name: + return connection + return None + + def list( + self, + *, + connection_type: ConnectionType | None = None, + populate_secrets: bool = False + ) -> Iterable[ConnectionsListSecretsResponse]: + # First make a REST call to /list to get all the connections + internal_response: ConnectionsListResponse = self._list() + filtered_connections: List[ConnectionsListSecretsResponse] = [] + # Filter by connection type + for connection in internal_response.value: + if connection_type is None or connection.properties.category == connection_type: + filtered_connections.append(connection) + if not populate_secrets: + # If no secrets are needed, we are done. Return filtered list. + return filtered_connections + else: + # If secrets are needed, for each connection in the list, we now + # need to make a /listSecrets rest call to get the connection with secrets + filtered_connections_with_secrets: List[ConnectionsListSecretsResponse] = [] + for connection in filtered_connections: + filtered_connections_with_secrets.append( + self.get(connection_name=connection.name, populate_secrets=True) + ) + return filtered_connections_with_secrets + + +__all__: List[str] = [ + "ConnectionsOperations" +] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/py.typed b/sdk/ai/azure-ai-client/azure/ai/client/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/dev_requirements.txt b/sdk/ai/azure-ai-client/dev_requirements.txt new file mode 100644 index 000000000000..c82827bb56f4 --- /dev/null +++ b/sdk/ai/azure-ai-client/dev_requirements.txt @@ -0,0 +1,4 @@ +-e ../../../tools/azure-sdk-tools +../../core/azure-core +../../identity/azure-identity +aiohttp \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/generated_tests/conftest.py b/sdk/ai/azure-ai-client/generated_tests/conftest.py new file mode 100644 index 000000000000..a308f4a37a08 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_tests/conftest.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import os +import pytest +from dotenv import load_dotenv +from devtools_testutils import ( + test_proxy, + add_general_regex_sanitizer, + add_body_key_sanitizer, + add_header_regex_sanitizer, +) + +load_dotenv() + + +# For security, please avoid record sensitive identity information in recordings +@pytest.fixture(scope="session", autouse=True) +def add_sanitizers(test_proxy): + _subscription_id = os.environ.get("_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + _tenant_id = os.environ.get("_TENANT_ID", "00000000-0000-0000-0000-000000000000") + _client_id = os.environ.get("_CLIENT_ID", "00000000-0000-0000-0000-000000000000") + _client_secret = os.environ.get("_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=_subscription_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=_tenant_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=_client_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=_client_secret, value="00000000-0000-0000-0000-000000000000") + + add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]") + add_header_regex_sanitizer(key="Cookie", value="cookie;") + add_body_key_sanitizer(json_path="$..access_token", value="access_token") diff --git a/sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations.py b/sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations.py new file mode 100644 index 000000000000..b3452e1889bf --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils import recorded_by_proxy +from testpreparer import ClientTestBase, Preparer + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestEndpointsOperations(ClientTestBase): + @Preparer() + @recorded_by_proxy + def test_endpoints_list(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.endpoints.list() + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_endpoints_list_secrets(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.endpoints.list_secrets( + connection_name_in_url="str", + body={ + "apiVersionInBody": "str", + "connectionName": "str", + "resourceGroupName": "str", + "subscriptionId": "str", + "workspaceName": "str", + }, + connection_name="str", + subscription_id="str", + resource_group_name="str", + workspace_name="str", + api_version_in_body="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations_async.py b/sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations_async.py new file mode 100644 index 000000000000..3bcdb63affc8 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations_async.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils.aio import recorded_by_proxy_async +from testpreparer import Preparer +from testpreparer_async import ClientTestBaseAsync + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestEndpointsOperationsAsync(ClientTestBaseAsync): + @Preparer() + @recorded_by_proxy_async + async def test_endpoints_list(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.endpoints.list() + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_endpoints_list_secrets(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.endpoints.list_secrets( + connection_name_in_url="str", + body={ + "apiVersionInBody": "str", + "connectionName": "str", + "resourceGroupName": "str", + "subscriptionId": "str", + "workspaceName": "str", + }, + connection_name="str", + subscription_id="str", + resource_group_name="str", + workspace_name="str", + api_version_in_body="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/testpreparer.py b/sdk/ai/azure-ai-client/generated_tests/testpreparer.py new file mode 100644 index 000000000000..07f80c19c778 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_tests/testpreparer.py @@ -0,0 +1,24 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from azure.ai.client import Client +from devtools_testutils import AzureRecordedTestCase, PowerShellPreparer +import functools + + +class ClientTestBase(AzureRecordedTestCase): + + def create_client(self, endpoint): + credential = self.get_credential(Client) + return self.create_client_from_credential( + Client, + credential=credential, + endpoint=endpoint, + ) + + +Preparer = functools.partial(PowerShellPreparer, "", _endpoint="https://fake__endpoint.com") diff --git a/sdk/ai/azure-ai-client/generated_tests/testpreparer_async.py b/sdk/ai/azure-ai-client/generated_tests/testpreparer_async.py new file mode 100644 index 000000000000..78bccaa8d731 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_tests/testpreparer_async.py @@ -0,0 +1,20 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from azure.ai.client.aio import Client +from devtools_testutils import AzureRecordedTestCase + + +class ClientTestBaseAsync(AzureRecordedTestCase): + + def create_async_client(self, endpoint): + credential = self.get_credential(Client, is_async=True) + return self.create_client_from_credential( + Client, + credential=credential, + endpoint=endpoint, + ) diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py new file mode 100644 index 000000000000..f710aade9344 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -0,0 +1,134 @@ +import sys +import logging +logger = logging.getLogger("azure") +logger.setLevel(logging.DEBUG) +logger.addHandler(logging.StreamHandler(stream=sys.stdout)) + +import os +from azure.ai.client import AzureAIClient +from azure.ai.client.models import ConnectionType, AuthenticationType +from openai import AzureOpenAI +from azure.ai.inference import ChatCompletionsClient +from azure.ai.inference.models import UserMessage +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from azure.core.credentials import AzureKeyCredential + +ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + subscription_id=os.environ["AZURE_SUBSCRIPTION"], + resource_group_name=os.environ["AZURE_RESOURCE_GROUP"], + workspace_name=os.environ["AI_STUDIO_HUB"], + logging_enable=True, +) + +# You can list all connections, with or without their credentials +endpoints = ai_client.endpoints.list( + connection_type=ConnectionType.AZURE_OPEN_AI, # Optional. Defaults to all types. + populate_secrets=True # Optional. Defaults to "False" +) + +# Or you can get properties of a single connection +endpoint = ai_client.endpoints.get( + connection_name=os.environ["AI_STUDIO_CONNECTION_2"], + populate_secrets=True +) + +""" +# Remove trailing slash from the endpoint if exist +connection.properties.target = ( + connection.properties.target[:-1] if connection.properties.target.endswith("/") else connection.properties.target +) +print(json.dumps(connection.as_dict(), indent=2)) + + +print(connection) +print(json.dumps(connection.as_dict(), indent=2)) +exit() + +connection = ai_client.connections.get( + connection_name=os.environ["AI_STUDIO_CONNECTION_3"], + populate_secrets=False +) +print(connection) +print(json.dumps(connection.as_dict(), indent=2)) + + +connections = ai_client.connections.list( + connection_type=ConnectionType.AZURE_OPEN_AI, # Optional + populate_secrets=True # Optional. Defaults to "False" +) +for connection in connections: + print(json.dumps(connection.as_dict(), indent=2)) + +exit() +""" + +# Here is how you would create the appropriate AOAI or Inference SDK for these endpoint +if endpoint.properties.category == ConnectionType.AZURE_OPEN_AI: + + if endpoint.properties.auth_type == AuthenticationType.API_KEY: + print("====> Creating AzureOpenAI client using API key authentication") + client = AzureOpenAI( + api_key=endpoint.properties.credentials.key, + azure_endpoint=endpoint.properties.target, + api_version="2024-08-01-preview", # TODO: Is this needed? + ) + elif endpoint.properties.auth_type == AuthenticationType.AAD: + print("====> Creating AzureOpenAI client using Entra ID authentication") + client = AzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider(endpoint.properties.token_credential, "https://cognitiveservices.azure.com/.default"), + azure_endpoint=endpoint.properties.target, + api_version="2024-08-01-preview", + ) + elif endpoint.properties.auth_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + print("====> Creating AzureOpenAI client using SAS authentication") + client = AzureOpenAI( + azure_ad_token_provider=get_bearer_token_provider(endpoint.properties.token_credential, "https://cognitiveservices.azure.com/.default"), + azure_endpoint=endpoint.properties.target, + api_version="2024-08-01-preview", + ) + + response = client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print(response.choices[0].message.content) + +elif endpoint.properties.category == ConnectionType.SERVERLESS: + + if endpoint.properties.auth_type == AuthenticationType.API_KEY: + print("====> Creating ChatCompletionsClient using API key authentication") + client = ChatCompletionsClient( + endpoint=endpoint.properties.target, + credential=AzureKeyCredential(endpoint.properties.credentials.key) + ) + elif endpoint.properties.auth_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + print("====> Creating ChatCompletionsClient using Entra ID authentication") + client = ChatCompletionsClient( + endpoint=endpoint.properties.target, + credential=endpoint.properties.token_credential + ) + elif endpoint.properties.auth_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + print("====> Creating ChatCompletionsClient using SAS authentication") + client = ChatCompletionsClient( + endpoint=endpoint.properties.target, + credential=endpoint.properties.token_credential + ) + + response = client.complete( + messages=[ + UserMessage(content="How many feet are in a mile?") + ] + ) + + print(response.choices[0].message.content) \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/setup.py b/sdk/ai/azure-ai-client/setup.py new file mode 100644 index 000000000000..ca2aa3c55e1f --- /dev/null +++ b/sdk/ai/azure-ai-client/setup.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# coding: utf-8 + +import os +import re +from setuptools import setup, find_packages + + +PACKAGE_NAME = "azure-ai-client" +PACKAGE_PPRINT_NAME = "Azure Ai Client" + +# a-b-c => a/b/c +package_folder_path = PACKAGE_NAME.replace("-", "/") + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, "_version.py"), "r") as fd: + version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError("Cannot find version information") + + +setup( + name=PACKAGE_NAME, + version=version, + description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + long_description=open("README.md", "r").read(), + long_description_content_type="text/markdown", + license="MIT License", + author="Microsoft Corporation", + author_email="azpysdkhelp@microsoft.com", + url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", + keywords="azure, azure sdk", + classifiers=[ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "License :: OSI Approved :: MIT License", + ], + zip_safe=False, + packages=find_packages( + exclude=[ + "tests", + # Exclude packages that will be covered by PEP420 or nspkg + "azure", + "azure.ai", + ] + ), + include_package_data=True, + package_data={ + "azure.ai.client": ["py.typed"], + }, + install_requires=[ + "isodate>=0.6.1", + "azure-core>=1.30.0", + "typing-extensions>=4.6.0", + ], + python_requires=">=3.8", +) diff --git a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py new file mode 100644 index 000000000000..ddb532514155 --- /dev/null +++ b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py @@ -0,0 +1,114 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import sys +import logging +import datetime +from azure.ai.client import SASTokenCredential +from azure.core.credentials import TokenCredential, AccessToken +from azure.core.exceptions import HttpResponseError + +#import azure.ai.client as sdk + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + +class FakeTokenCredential(TokenCredential): + def get_token(self, *scopes, **kwargs): + # Create a fake token with an expiration time + token = "fake_token" + expires_on = datetime.datetime.now() + datetime.timedelta(hours=1) + return AccessToken(token, expires_on.timestamp()) + +# The test class name needs to start with "Test" to get collected by pytest +class TestUnit: + + # ********************************************************************************** + # + # UNIT TESTS + # + # ********************************************************************************** + + def test_sas_token_credential_class_mocked(self, **kwargs): + import jwt + import datetime + import time + + # Create a simple JWT with 10 seconds expieration time + token_duration_sec = 5 + secret_key = "my_secret_key" + token_duration_sec = 5 + sas_token_expiration: datetime = datetime.datetime.now(datetime.timezone.utc) + \ + datetime.timedelta(seconds=token_duration_sec) + sas_token_expiration = sas_token_expiration.replace(microsecond=0) + payload = { + 'exp': sas_token_expiration + } + sas_token = jwt.encode(payload, secret_key) + + # You can parse the token string on https://jwt.ms/. The "exp" value there is the + # token expiration time in Unix timestamp format (seconds since 1970-01-01 00:00:00 UTC). + # See https://www.epochconverter.com/ to convert Unix time to readable date & time. + # The base64 decoded string will look something like this: + # { + # "alg": "HS256", + # "typ": "JWT" + # }.{ + # "exp": 1727208894 + # }.[Signature] + print(f"Generated JWT token: {sas_token}") + + sas_token_credential = SASTokenCredential( + sas_token = sas_token, + credential = FakeTokenCredential(), + subscription_id = "fake_subscription_id", + resource_group_name = "fake_resouce_group", + workspace_name = "fake_workspace_name", + connection_name = "fake_connection_name" + ) + assert sas_token_credential._expires_on == sas_token_expiration + + exception_caught = False + try: + for _ in range(token_duration_sec + 2): + print("Looping...") + time.sleep(1) + access_token = sas_token_credential.get_token() + except HttpResponseError as e: + exception_caught = True + print(e) + assert exception_caught + + + # Unit tests for the SASTokenCredential class + def test_sas_token_credential_class_real(self, **kwargs): + + # Example of real SAS token for AOAI service. You can parse it on https://jwt.ms/. The "exp" value there is the + # token expiration time in Unix timestamp format (seconds since 1970-01-01 00:00:00 UTC) + token = "eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleTEiLCJ0eXAiOiJKV1QifQ.eyJyZWdpb24iOiJlYXN0dXMyZXVhcCIsInN1YnNjcmlwdGlvbi1pZCI6IjQyZjVlYWFjMjc5MDRiMGViMDI4ZTVkZjcyYzg5ZDAxIiwicHJvZHVjdC1pZCI6Ik9wZW5BSS5TMCIsImNvZ25pdGl2ZS1zZXJ2aWNlcy1lbmRwb2ludCI6Imh0dHBzOi8vYXBpLmNvZ25pdGl2ZS5taWNyb3NvZnQuY29tL2ludGVybmFsL3YxLjAvIiwiYXp1cmUtcmVzb3VyY2UtaWQiOiIvc3Vic2NyaXB0aW9ucy84ZjMzOGY2ZS00ZmNlLTQ0YWUtOTY5Yy1mYzdkOGZkYTAzMGUvcmVzb3VyY2VHcm91cHMvYXJncnlnb3JfY2FuYXJ5L3Byb3ZpZGVycy9NaWNyb3NvZnQuQ29nbml0aXZlU2VydmljZXMvYWNjb3VudHMvYXJncnlnb3ItY2FuYXJ5LWFvYWkiLCJzY29wZSI6Imh0dHBzOi8vc3BlZWNoLnBsYXRmb3JtLmJpbmcuY29tIiwiYXVkIjoidXJuOm1zLnNwZWVjaCIsImV4cCI6MTcyNjc4MjI0NiwiaXNzIjoidXJuOm1zLmNvZ25pdGl2ZXNlcnZpY2VzIn0.L7VvsXPzbwHQeMS-o9Za4itkU6uP4-KFMyOpTsYD9tpIJa_qChMHDl8FHy5n7K5L1coKg8sJE6LlJICFdU1ALQ" + expiration_date_linux_time = 1726782246 # Value of "exp" field in the token. See https://www.epochconverter.com/ to convert to date & time + expiration_datatime_utc = datetime.datetime.fromtimestamp(expiration_date_linux_time, datetime.timezone.utc) + print(f"\n[TEST] Expected expiration date: {expiration_datatime_utc}") + + sas_token_credential = SASTokenCredential( + sas_token = token, + credential = None, + subscription_id = None, + resource_group_name = None, + workspace_name = None, + connection_name = None + ) + + print(f"[TEST] Actual expiration date: {sas_token_credential._expires_on}") + assert sas_token_credential._expires_on == expiration_datatime_utc diff --git a/sdk/ai/azure-ai-client/tsp-location.yaml b/sdk/ai/azure-ai-client/tsp-location.yaml new file mode 100644 index 000000000000..17c143ca684b --- /dev/null +++ b/sdk/ai/azure-ai-client/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/ai/Azure.AI.Client +commit: fa158c4ec75da8d4c5abb2aa3ca503372b465815 +repo: Azure/azure-rest-api-specs +additionalDirectories: From 5edce71e734ca5ec14447d6cc5ebf15c0309373b Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 25 Sep 2024 15:27:06 -0700 Subject: [PATCH 002/138] Make Endpoints sample work --- .../azure/ai/client/aio/operations/_operations.py | 10 +++++----- .../azure/ai/client/operations/_operations.py | 10 +++++----- .../azure/ai/client/operations/_patch.py | 6 +++--- .../samples/endpoints/sample_endpoints.py | 6 +++++- 4 files changed, 18 insertions(+), 14 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py index 29476aacc7b8..abf0d2f6ac35 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py @@ -58,7 +58,7 @@ def __init__(self, *args, **kwargs) -> None: self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async - async def list(self, **kwargs: Any) -> _models.ConnectionsListResponse: + async def _list(self, **kwargs: Any) -> _models.ConnectionsListResponse: """List the details of all the connections (not including their credentials). :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping @@ -119,7 +119,7 @@ async def list(self, **kwargs: Any) -> _models.ConnectionsListResponse: return deserialized # type: ignore @overload - async def list_secrets( + async def _list_secrets( self, connection_name_in_url: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ConnectionsListSecretsResponse: """Get the details of a single connection, including credential (if available). @@ -138,7 +138,7 @@ async def list_secrets( """ @overload - async def list_secrets( + async def _list_secrets( self, connection_name_in_url: str, *, @@ -175,7 +175,7 @@ async def list_secrets( """ @overload - async def list_secrets( + async def _list_secrets( self, connection_name_in_url: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.ConnectionsListSecretsResponse: """Get the details of a single connection, including credential (if available). @@ -194,7 +194,7 @@ async def list_secrets( """ @distributed_trace_async - async def list_secrets( + async def _list_secrets( self, connection_name_in_url: str, body: Union[JSON, IO[bytes]] = _Unset, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py index 0bb895c97d2a..876bc9a9289f 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py @@ -107,7 +107,7 @@ def __init__(self, *args, **kwargs): self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def list(self, **kwargs: Any) -> _models.ConnectionsListResponse: + def _list(self, **kwargs: Any) -> _models.ConnectionsListResponse: """List the details of all the connections (not including their credentials). :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping @@ -168,7 +168,7 @@ def list(self, **kwargs: Any) -> _models.ConnectionsListResponse: return deserialized # type: ignore @overload - def list_secrets( + def _list_secrets( self, connection_name_in_url: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.ConnectionsListSecretsResponse: """Get the details of a single connection, including credential (if available). @@ -187,7 +187,7 @@ def list_secrets( """ @overload - def list_secrets( + def _list_secrets( self, connection_name_in_url: str, *, @@ -224,7 +224,7 @@ def list_secrets( """ @overload - def list_secrets( + def _list_secrets( self, connection_name_in_url: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.ConnectionsListSecretsResponse: """Get the details of a single connection, including credential (if available). @@ -243,7 +243,7 @@ def list_secrets( """ @distributed_trace - def list_secrets( + def _list_secrets( self, connection_name_in_url: str, body: Union[JSON, IO[bytes]] = _Unset, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index c8d361f1024f..f12328b0bf99 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -8,11 +8,11 @@ """ from typing import List, Iterable #from zoneinfo import ZoneInfo -from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated +from ._operations import EndpointsOperations as EndpointsOperationsGenerated from ..models._enums import AuthenticationType, ConnectionType from ..models._models import ConnectionsListSecretsResponse, ConnectionsListResponse -class ConnectionsOperations(ConnectionsOperationsGenerated): +class EndpointsOperations(EndpointsOperationsGenerated): def get( self, @@ -81,7 +81,7 @@ def list( __all__: List[str] = [ - "ConnectionsOperations" + "EndpointsOperations" ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index f710aade9344..ab7b51b27da5 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -21,15 +21,19 @@ logging_enable=True, ) +""" # You can list all connections, with or without their credentials endpoints = ai_client.endpoints.list( connection_type=ConnectionType.AZURE_OPEN_AI, # Optional. Defaults to all types. populate_secrets=True # Optional. Defaults to "False" ) +exit() +""" + # Or you can get properties of a single connection endpoint = ai_client.endpoints.get( - connection_name=os.environ["AI_STUDIO_CONNECTION_2"], + connection_name=os.environ["AI_STUDIO_CONNECTION_1"], populate_secrets=True ) From 7f17e494e721ee68c736929431ff751368db7548 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 25 Sep 2024 22:31:37 -0700 Subject: [PATCH 003/138] Create EndpointProperties class. Implement client.endpoints.get_default --- .../azure/ai/client/models/__init__.py | 4 +- .../azure/ai/client/models/_enums.py | 4 +- .../azure/ai/client/models/_models.py | 19 ++-- .../azure/ai/client/models/_patch.py | 38 ++++++- .../azure/ai/client/operations/_patch.py | 77 +++++++------ .../samples/endpoints/sample_endpoints.py | 101 ++++++++---------- 6 files changed, 137 insertions(+), 106 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py index 9c5a659cca98..dbc55e6f4a35 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py @@ -16,7 +16,7 @@ from ._models import CredentialsSASAuth from ._enums import AuthenticationType -from ._enums import ConnectionType +from ._enums import EndpointType from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import from ._patch import patch_sdk as _patch_sdk @@ -31,7 +31,7 @@ "CredentialsApiKeyAuth", "CredentialsSASAuth", "AuthenticationType", - "ConnectionType", + "EndpointType", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py index 0ba289a9f795..9c4b6748d098 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py @@ -21,10 +21,12 @@ class AuthenticationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Shared Access Signature (SAS) authentication""" -class ConnectionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): +class EndpointType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The Type (or category) of the connection.""" AZURE_OPEN_AI = "AzureOpenAI" """Azure OpenAI""" SERVERLESS = "Serverless" """Serverless API""" + AGENTS = "Agents" + """Agents""" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py index 224cf16d8e16..881e4f32c105 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py @@ -17,7 +17,6 @@ # pylint: disable=unused-import,ungrouped-imports from .. import models as _models - class ConnectionProperties(_model_base.Model): """to do. @@ -63,14 +62,14 @@ class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): :vartype auth_type: str or ~azure.ai.client.models.AAD :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and "Serverless". - :vartype category: str or ~azure.ai.client.models.ConnectionType + :vartype category: str or ~azure.ai.client.models.EndpointType :ivar target: to do. Required. :vartype target: str """ auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. Entra ID authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() + category: Union[str, "_models.EndpointType"] = rest_field() """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" target: str = rest_field() """to do. Required.""" @@ -79,7 +78,7 @@ class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): def __init__( self, *, - category: Union[str, "_models.ConnectionType"], + category: Union[str, "_models.EndpointType"], target: str, ): ... @@ -102,7 +101,7 @@ class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey :vartype auth_type: str or ~azure.ai.client.models.API_KEY :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and "Serverless". - :vartype category: str or ~azure.ai.client.models.ConnectionType + :vartype category: str or ~azure.ai.client.models.EndpointType :ivar credentials: Credentials will only be present for authType=ApiKey. Required. :vartype credentials: ~azure.ai.client.models.CredentialsApiKeyAuth :ivar target: to do. Required. @@ -111,7 +110,7 @@ class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. API Key authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() + category: Union[str, "_models.EndpointType"] = rest_field() """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" credentials: "_models.CredentialsApiKeyAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" @@ -122,7 +121,7 @@ class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey def __init__( self, *, - category: Union[str, "_models.ConnectionType"], + category: Union[str, "_models.EndpointType"], credentials: "_models.CredentialsApiKeyAuth", target: str, ): ... @@ -147,7 +146,7 @@ class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): :vartype auth_type: str or ~azure.ai.client.models.SAS :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and "Serverless". - :vartype category: str or ~azure.ai.client.models.ConnectionType + :vartype category: str or ~azure.ai.client.models.EndpointType :ivar credentials: Credentials will only be present for authType=ApiKey. Required. :vartype credentials: ~azure.ai.client.models.CredentialsSASAuth :ivar target: to do. Required. @@ -157,7 +156,7 @@ class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. Shared Access Signature (SAS) authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() + category: Union[str, "_models.EndpointType"] = rest_field() """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" credentials: "_models.CredentialsSASAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" @@ -168,7 +167,7 @@ class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): def __init__( self, *, - category: Union[str, "_models.ConnectionType"], + category: Union[str, "_models.EndpointType"], credentials: "_models.CredentialsSASAuth", target: str, ): ... diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index f7dd32510333..1ea7aa5f9e0c 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -6,11 +6,47 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +import datetime from typing import List +from azure.core.credentials import TokenCredential +from ._models import ConnectionsListSecretsResponse -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +class EndpointProperties: + + def __init__( + self, + *, + connection: ConnectionsListSecretsResponse, + token_credential: TokenCredential = None + ) -> None: + self.name=connection.name + self.authentication_type=connection.properties.auth_type + self.endpoint_type=connection.properties.category + self.endpoint_url=(connection.properties.target[:-1] if connection.properties.target.endswith("/") else connection.properties.target) + self.key: str = None + if hasattr(connection.properties, 'credentials'): + if hasattr(connection.properties.credentials, 'key'): + self.key=connection.properties.credentials.key + self.token_credential=token_credential + + def __str__(self): + out = "{\n" + out += f" \"name\": \"{self.name}\",\n" + out += f" \"authentication_type\": \"{self.authentication_type}\",\n" + out += f" \"endpoint_type\": \"{self.endpoint_type}\",\n" + out += f" \"endpoint_url\": \"{self.endpoint_url}\",\n" + out += f" \"key\": \"{self.key}\",\n" + if self.token_credential: + access_token = self.token_credential.get_token("https://cognitiveservices.azure.com/.default") + out += f" \"token_credential\": \"{access_token.token}\", expires on {access_token.expires_on} ({datetime.datetime.fromtimestamp(access_token.expires_on, datetime.timezone.utc)})\n" + else: + out += f" \"token_credential\": \"null\"\n" + out += "}\n" + return out +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + def patch_sdk(): """Do not remove from this file. diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index f12328b0bf99..e78a8122c64f 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -9,75 +9,84 @@ from typing import List, Iterable #from zoneinfo import ZoneInfo from ._operations import EndpointsOperations as EndpointsOperationsGenerated -from ..models._enums import AuthenticationType, ConnectionType +from ..models._enums import AuthenticationType, EndpointType from ..models._models import ConnectionsListSecretsResponse, ConnectionsListResponse +from ..models._patch import EndpointProperties class EndpointsOperations(EndpointsOperationsGenerated): + def get_default( + self, + *, + endpoint_type: EndpointType | None = None, + populate_secrets: bool = False + ) -> EndpointProperties: + endpoint_properties_list = self.list(endpoint_type=endpoint_type, populate_secrets=populate_secrets) + # Since there is no notion of service default at the moment, always return the first one + if len(endpoint_properties_list) > 0: + return endpoint_properties_list[0] + else: + return None + + def get( self, *, - connection_name: str, + endpoint_name: str, populate_secrets: bool = False ) -> ConnectionsListSecretsResponse: - if not connection_name: - raise ValueError("connection_name cannot be empty") + if not endpoint_name: + raise ValueError("Endpoint name cannot be empty") if populate_secrets: connection: ConnectionsListSecretsResponse = self._list_secrets( - connection_name_in_url=connection_name, - connection_name=connection_name, + connection_name_in_url=endpoint_name, + connection_name=endpoint_name, subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, workspace_name=self._config.workspace_name, api_version_in_body=self._config.api_version, ) if connection.properties.auth_type == AuthenticationType.AAD: - connection.properties.token_credential = self._config.credential - return connection + return EndpointProperties(connection=connection, token_credential=self._config.credential) elif connection.properties.auth_type == AuthenticationType.SAS: from .._patch import SASTokenCredential - connection.properties.token_credentials = SASTokenCredential( + token_credential = SASTokenCredential( sas_token=connection.properties.credentials.sas, credential=self._config.credential, subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, workspace_name=self._config.workspace_name, - connection_name=connection_name) - return connection + connection_name=endpoint_name) + return EndpointProperties(connection=connection, token_credential=token_credential) - return connection + return EndpointProperties(connection=connection) else: internal_response: ConnectionsListResponse = self._list() for connection in internal_response.value: - if connection_name == connection.name: - return connection + if endpoint_name == connection.name: + return EndpointProperties(connection=connection) return None def list( self, *, - connection_type: ConnectionType | None = None, + endpoint_type: EndpointType | None = None, populate_secrets: bool = False - ) -> Iterable[ConnectionsListSecretsResponse]: - # First make a REST call to /list to get all the connections - internal_response: ConnectionsListResponse = self._list() - filtered_connections: List[ConnectionsListSecretsResponse] = [] + ) -> Iterable[EndpointProperties]: + + # First make a REST call to /list to get all the connections, without secrets + connections_list: ConnectionsListResponse = self._list() + endpoint_properties_list: List[EndpointProperties] = [] + # Filter by connection type - for connection in internal_response.value: - if connection_type is None or connection.properties.category == connection_type: - filtered_connections.append(connection) - if not populate_secrets: - # If no secrets are needed, we are done. Return filtered list. - return filtered_connections - else: - # If secrets are needed, for each connection in the list, we now - # need to make a /listSecrets rest call to get the connection with secrets - filtered_connections_with_secrets: List[ConnectionsListSecretsResponse] = [] - for connection in filtered_connections: - filtered_connections_with_secrets.append( - self.get(connection_name=connection.name, populate_secrets=True) - ) - return filtered_connections_with_secrets + for connection in connections_list.value: + if endpoint_type is None or connection.properties.category == endpoint_type: + if not populate_secrets: + endpoint_properties_list.append(EndpointProperties(connection=connection)) + else: + endpoint_properties_list.append(self.get(endpoint_name=connection.name, populate_secrets=True)) + + return endpoint_properties_list __all__: List[str] = [ diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index ab7b51b27da5..82ab7f057378 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -1,96 +1,81 @@ +""" +# These are needed for SDK logging. You can ignore them. import sys import logging logger = logging.getLogger("azure") logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(stream=sys.stdout)) +# End of logging setup +""" import os from azure.ai.client import AzureAIClient -from azure.ai.client.models import ConnectionType, AuthenticationType +from azure.ai.client.models import EndpointType, AuthenticationType from openai import AzureOpenAI from azure.ai.inference import ChatCompletionsClient from azure.ai.inference.models import UserMessage from azure.identity import DefaultAzureCredential, get_bearer_token_provider from azure.core.credentials import AzureKeyCredential +# Create an Azure AI Client from individual ai_client = AzureAIClient( credential=DefaultAzureCredential(), subscription_id=os.environ["AZURE_SUBSCRIPTION"], resource_group_name=os.environ["AZURE_RESOURCE_GROUP"], workspace_name=os.environ["AI_STUDIO_HUB"], - logging_enable=True, + #logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) -""" -# You can list all connections, with or without their credentials +# You can list all endpoints of a particular "type", with or without their credentials: endpoints = ai_client.endpoints.list( - connection_type=ConnectionType.AZURE_OPEN_AI, # Optional. Defaults to all types. + endpoint_type=EndpointType.AZURE_OPEN_AI, # Optional. Defaults to all types. populate_secrets=True # Optional. Defaults to "False" ) +print("====> Listing all Azure Open AI endpoints:") +for endpoint in endpoints: + print(endpoint) + +# You can get the default endpoint of a particular "type" (note that since at the moment the service +# does not have a notion of a default endpoint, this will return the first endpoint of that type): +endpoint = ai_client.endpoints.get_default( + endpoint_type=EndpointType.AZURE_OPEN_AI, # Optional. Defaults to all types. + populate_secrets=True # Optional. Defaults to "False" +) +print("====> Get default Azure Open AI endpoint:") +print(endpoint) -exit() -""" - -# Or you can get properties of a single connection +# You can get an endpoint by its name: endpoint = ai_client.endpoints.get( - connection_name=os.environ["AI_STUDIO_CONNECTION_1"], + endpoint_name=os.environ["AI_STUDIO_CONNECTION_1"], populate_secrets=True ) - -""" -# Remove trailing slash from the endpoint if exist -connection.properties.target = ( - connection.properties.target[:-1] if connection.properties.target.endswith("/") else connection.properties.target -) -print(json.dumps(connection.as_dict(), indent=2)) - - -print(connection) -print(json.dumps(connection.as_dict(), indent=2)) -exit() - -connection = ai_client.connections.get( - connection_name=os.environ["AI_STUDIO_CONNECTION_3"], - populate_secrets=False -) -print(connection) -print(json.dumps(connection.as_dict(), indent=2)) - - -connections = ai_client.connections.list( - connection_type=ConnectionType.AZURE_OPEN_AI, # Optional - populate_secrets=True # Optional. Defaults to "False" -) -for connection in connections: - print(json.dumps(connection.as_dict(), indent=2)) - -exit() -""" +print("====> Print properties of a particular endpoint:") +print(endpoint) # Here is how you would create the appropriate AOAI or Inference SDK for these endpoint -if endpoint.properties.category == ConnectionType.AZURE_OPEN_AI: +if endpoint.endpoint_type == EndpointType.AZURE_OPEN_AI: - if endpoint.properties.auth_type == AuthenticationType.API_KEY: + if endpoint.authentication_type == AuthenticationType.API_KEY: print("====> Creating AzureOpenAI client using API key authentication") client = AzureOpenAI( - api_key=endpoint.properties.credentials.key, - azure_endpoint=endpoint.properties.target, + api_key=endpoint.key, + azure_endpoint=endpoint.endpoint_url, api_version="2024-08-01-preview", # TODO: Is this needed? ) - elif endpoint.properties.auth_type == AuthenticationType.AAD: + elif endpoint.authentication_type == AuthenticationType.AAD: print("====> Creating AzureOpenAI client using Entra ID authentication") client = AzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider(endpoint.properties.token_credential, "https://cognitiveservices.azure.com/.default"), - azure_endpoint=endpoint.properties.target, + azure_ad_token_provider=get_bearer_token_provider(endpoint.token_credential, "https://cognitiveservices.azure.com/.default"), + azure_endpoint=endpoint.endpoint_url, api_version="2024-08-01-preview", ) - elif endpoint.properties.auth_type == AuthenticationType.SAS: + elif endpoint.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. print("====> Creating AzureOpenAI client using SAS authentication") client = AzureOpenAI( - azure_ad_token_provider=get_bearer_token_provider(endpoint.properties.token_credential, "https://cognitiveservices.azure.com/.default"), - azure_endpoint=endpoint.properties.target, + azure_ad_token_provider=get_bearer_token_provider(endpoint.token_credential, "https://cognitiveservices.azure.com/.default"), + azure_endpoint=endpoint.endpoint_url, api_version="2024-08-01-preview", ) @@ -106,27 +91,27 @@ print(response.choices[0].message.content) -elif endpoint.properties.category == ConnectionType.SERVERLESS: +elif endpoint.endpoint_type == EndpointType.SERVERLESS: - if endpoint.properties.auth_type == AuthenticationType.API_KEY: + if endpoint.authentication_type == AuthenticationType.API_KEY: print("====> Creating ChatCompletionsClient using API key authentication") client = ChatCompletionsClient( - endpoint=endpoint.properties.target, + endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.properties.credentials.key) ) - elif endpoint.properties.auth_type == AuthenticationType.AAD: + elif endpoint.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") client = ChatCompletionsClient( - endpoint=endpoint.properties.target, + endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential ) - elif endpoint.properties.auth_type == AuthenticationType.SAS: + elif endpoint.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. print("====> Creating ChatCompletionsClient using SAS authentication") client = ChatCompletionsClient( - endpoint=endpoint.properties.target, - credential=endpoint.properties.token_credential + endpoint=endpoint.endpoint_url, + credential=endpoint.token_credential ) response = client.complete( From f41ae25c7c209fc25c0467a11c81795289e64ec3 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 25 Sep 2024 23:12:31 -0700 Subject: [PATCH 004/138] Support AzureAIClient.from_connection_string() --- .../azure-ai-client/azure/ai/client/_patch.py | 17 ++++++++++++++++- .../samples/endpoints/sample_endpoints.py | 13 ++++++++++++- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index d3abe1b99b97..e92a572ecf5f 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -18,7 +18,22 @@ # This is only done to rename the client. Can we do this in TypeSpec? class AzureAIClient(ClientGenerated): - pass + @classmethod + def from_connection_string( + cls, + connection: str, + credential: "TokenCredential", + **kwargs + ) -> "AzureAIClient": + if not connection: + raise ValueError("Connection string is required") + parts = connection.split(";") + if len(parts) != 3: + raise ValueError("Invalid connection string format") + workspace_name = parts[0] + resource_group_name = parts[1] + subscription_id = parts[2] + return cls(subscription_id, resource_group_name, workspace_name, credential, **kwargs) class SASTokenCredential(TokenCredential): def __init__( diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index 82ab7f057378..366b929ffb3b 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -1,3 +1,4 @@ + """ # These are needed for SDK logging. You can ignore them. import sys @@ -17,7 +18,16 @@ from azure.identity import DefaultAzureCredential, get_bearer_token_provider from azure.core.credentials import AzureKeyCredential -# Create an Azure AI Client from individual + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;" +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=os.environ["AI_STUDIO_PROJECT_CONNECTION_STRING"], + #logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) + +# Or, you can create the Azure AI Client by giving all required parameters directly ai_client = AzureAIClient( credential=DefaultAzureCredential(), subscription_id=os.environ["AZURE_SUBSCRIPTION"], @@ -26,6 +36,7 @@ #logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) + # You can list all endpoints of a particular "type", with or without their credentials: endpoints = ai_client.endpoints.list( endpoint_type=EndpointType.AZURE_OPEN_AI, # Optional. Defaults to all types. From ee2aad64d401da0ca5665822879747f273dedd53 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 25 Sep 2024 23:37:40 -0700 Subject: [PATCH 005/138] Make default input argument for .endpoints.get --- sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py | 4 +++- sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index e78a8122c64f..5b7b306e4e12 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -18,9 +18,11 @@ class EndpointsOperations(EndpointsOperationsGenerated): def get_default( self, *, - endpoint_type: EndpointType | None = None, + endpoint_type: EndpointType, populate_secrets: bool = False ) -> EndpointProperties: + if not endpoint_type: + raise ValueError("You must specify an endpoint type") endpoint_properties_list = self.list(endpoint_type=endpoint_type, populate_secrets=populate_secrets) # Since there is no notion of service default at the moment, always return the first one if len(endpoint_properties_list) > 0: diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index 366b929ffb3b..fe28866100cd 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -49,7 +49,7 @@ # You can get the default endpoint of a particular "type" (note that since at the moment the service # does not have a notion of a default endpoint, this will return the first endpoint of that type): endpoint = ai_client.endpoints.get_default( - endpoint_type=EndpointType.AZURE_OPEN_AI, # Optional. Defaults to all types. + endpoint_type=EndpointType.AZURE_OPEN_AI, # Required. populate_secrets=True # Optional. Defaults to "False" ) print("====> Get default Azure Open AI endpoint:") @@ -57,7 +57,7 @@ # You can get an endpoint by its name: endpoint = ai_client.endpoints.get( - endpoint_name=os.environ["AI_STUDIO_CONNECTION_1"], + endpoint_name=os.environ["AI_STUDIO_CONNECTION_1"], # Required. populate_secrets=True ) print("====> Print properties of a particular endpoint:") From 1619243bb16fa6ec51676a48d8a1f210ab53ff5a Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 2 Oct 2024 17:46:06 -0700 Subject: [PATCH 006/138] Re-emit from latest TypeSpec. First attempt to support 3 different endpoints --- .../azure/ai/client/_client.py | 24 +- .../azure/ai/client/_configuration.py | 15 +- .../azure-ai-client/azure/ai/client/_patch.py | 181 ++++- .../azure/ai/client/aio/_client.py | 24 +- .../azure/ai/client/aio/_configuration.py | 15 +- .../ai/client/aio/operations/__init__.py | 4 +- .../ai/client/aio/operations/_operations.py | 559 +++++++++++++-- .../azure/ai/client/models/__init__.py | 29 +- .../azure/ai/client/models/_enums.py | 4 +- .../azure/ai/client/models/_models.py | 382 +++++++--- .../azure/ai/client/models/_patch.py | 43 +- .../azure/ai/client/operations/__init__.py | 4 +- .../azure/ai/client/operations/_operations.py | 654 ++++++++++++++++-- .../azure/ai/client/operations/_patch.py | 28 +- .../test_evaluations_operations.py | 71 ++ .../test_evaluations_operations_async.py | 72 ++ sdk/ai/azure-ai-client/requirements.txt | 0 .../samples/endpoints/sample_endpoints.py | 22 +- .../tests/endpoints/unit_tests.py | 42 +- sdk/ai/azure-ai-client/tsp-location.yaml | 2 +- 20 files changed, 1771 insertions(+), 404 deletions(-) create mode 100644 sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations.py create mode 100644 sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations_async.py create mode 100644 sdk/ai/azure-ai-client/requirements.txt diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/_client.py index 9e3bfdf8d053..5406c3e3bb6d 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_client.py @@ -16,7 +16,7 @@ from ._configuration import ClientConfiguration from ._serialization import Deserializer, Serializer -from .operations import AssistantsOperations, EndpointsOperations, EvaluationsOperations +from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -28,15 +28,20 @@ class Client: # pylint: disable=client-accepts-api-version-keyword :ivar endpoints: EndpointsOperations operations :vartype endpoints: azure.ai.client.operations.EndpointsOperations - :ivar assistants: AssistantsOperations operations - :vartype assistants: azure.ai.client.operations.AssistantsOperations + :ivar agents: AgentsOperations operations + :vartype agents: azure.ai.client.operations.AgentsOperations :ivar evaluations: EvaluationsOperations operations :vartype evaluations: azure.ai.client.operations.EvaluationsOperations - :param subscription_id: The ID of the target subscription. Required. + :param host_name: The Azure AI Studio project host name, in the format + ``.api.azureml.ms`` or ``..api.azureml.ms``\\\\ + , where :code:`` is the Azure region where the project is deployed (e.g. westus) + and :code:`` is the GUID of the Enterprise private link. Required. + :type host_name: str + :param subscription_id: The Azure subscription ID. Required. :type subscription_id: str - :param resource_group_name: The name of the Resource Group. Required. + :param resource_group_name: The name of the Azure Resource Group. Required. :type resource_group_name: str - :param workspace_name: The name of the workspace (Azure AI Studio hub). Required. + :param workspace_name: The name of the Azure AI Studio hub. Required. :type workspace_name: str :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.TokenCredential @@ -48,14 +53,16 @@ class Client: # pylint: disable=client-accepts-api-version-keyword def __init__( self, + host_name: str, subscription_id: str, resource_group_name: str, workspace_name: str, credential: "TokenCredential", **kwargs: Any ) -> None: - _endpoint = "https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}" # pylint: disable=line-too-long + _endpoint = "https://{hostName}/{subscriptionId}/{resourceGroupName}/{workspaceName}" self._config = ClientConfiguration( + host_name=host_name, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, @@ -85,7 +92,7 @@ def __init__( self._deserialize = Deserializer() self._serialize.client_side_validation = False self.endpoints = EndpointsOperations(self._client, self._config, self._serialize, self._deserialize) - self.assistants = AssistantsOperations(self._client, self._config, self._serialize, self._deserialize) + self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: @@ -108,6 +115,7 @@ def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: request_copy = deepcopy(request) path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py index de9b78e5df36..0be11b6382d1 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py @@ -23,11 +23,16 @@ class ClientConfiguration: # pylint: disable=too-many-instance-attributes Note that all parameters used to create this instance are saved as instance attributes. - :param subscription_id: The ID of the target subscription. Required. + :param host_name: The Azure AI Studio project host name, in the format + ``.api.azureml.ms`` or ``..api.azureml.ms``\\ , + where :code:`` is the Azure region where the project is deployed (e.g. westus) + and :code:`` is the GUID of the Enterprise private link. Required. + :type host_name: str + :param subscription_id: The Azure subscription ID. Required. :type subscription_id: str - :param resource_group_name: The name of the Resource Group. Required. + :param resource_group_name: The name of the Azure Resource Group. Required. :type resource_group_name: str - :param workspace_name: The name of the workspace (Azure AI Studio hub). Required. + :param workspace_name: The name of the Azure AI Studio hub. Required. :type workspace_name: str :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.TokenCredential @@ -39,6 +44,7 @@ class ClientConfiguration: # pylint: disable=too-many-instance-attributes def __init__( self, + host_name: str, subscription_id: str, resource_group_name: str, workspace_name: str, @@ -47,6 +53,8 @@ def __init__( ) -> None: api_version: str = kwargs.pop("api_version", "2024-07-01-preview") + if host_name is None: + raise ValueError("Parameter 'host_name' must not be None.") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") if resource_group_name is None: @@ -56,6 +64,7 @@ def __init__( if credential is None: raise ValueError("Parameter 'credential' must not be None.") + self.host_name = host_name self.subscription_id = subscription_id self.resource_group_name = resource_group_name self.workspace_name = workspace_name diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index e92a572ecf5f..49799ba2d1c0 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -10,42 +10,164 @@ import logging import base64 import json -from typing import List, Tuple, Union +from typing import List, Tuple, Union, Any from azure.core.credentials import TokenCredential, AccessToken +from azure.core import PipelineClient +from azure.core.pipeline import policies +from ._configuration import ClientConfiguration +from ._serialization import Deserializer, Serializer +from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations from ._client import Client as ClientGenerated logger = logging.getLogger(__name__) -# This is only done to rename the client. Can we do this in TypeSpec? class AzureAIClient(ClientGenerated): + + def __init__( + self, + host_name: str, + subscription_id: str, + resource_group_name: str, + workspace_name: str, + credential: "TokenCredential", + **kwargs: Any + ) -> None: + kwargs1 = kwargs.copy() + kwargs2 = kwargs.copy() + kwargs3 = kwargs.copy() + + # For Endpoints operations (enumerating connections, getting SAS tokens) + _endpoint1 = "https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}" # pylint: disable=line-too-long + self._config1 = ClientConfiguration( + host_name=host_name, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + workspace_name=workspace_name, + credential=credential, + api_version="2024-07-01-preview", + credential_scopes="https://management.azure.com/.default", + **kwargs1 + ) + _policies1 = kwargs1.pop("policies", None) + if _policies1 is None: + _policies1 = [ + policies.RequestIdPolicy(**kwargs1), + self._config1.headers_policy, + self._config1.user_agent_policy, + self._config1.proxy_policy, + policies.ContentDecodePolicy(**kwargs1), + self._config1.redirect_policy, + self._config1.retry_policy, + self._config1.authentication_policy, + self._config1.custom_hook_policy, + self._config1.logging_policy, + policies.DistributedTracingPolicy(**kwargs1), + policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, + self._config1.http_logging_policy, + ] + self._client1: PipelineClient = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) + + # For Agents operations + _endpoint2 = "https://{host_hame}/assistants/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}" # pylint: disable=line-too-long + self._config2 = ClientConfiguration( + host_name=host_name, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + workspace_name=workspace_name, + credential=credential, + api_version="2024-07-01-preview", # TODO: Update me + credential_scopes="https://ml.azure.com/.default", + **kwargs2 + ) + _policies2 = kwargs2.pop("policies", None) + if _policies2 is None: + _policies2 = [ + policies.RequestIdPolicy(**kwargs2), + self._config2.headers_policy, + self._config2.user_agent_policy, + self._config2.proxy_policy, + policies.ContentDecodePolicy(**kwargs2), + self._config2.redirect_policy, + self._config2.retry_policy, + self._config2.authentication_policy, + self._config2.custom_hook_policy, + self._config2.logging_policy, + policies.DistributedTracingPolicy(**kwargs2), + policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, + self._config2.http_logging_policy, + ] + self._client2: PipelineClient = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) + + # For Cloud Evaluations operations + _endpoint3 = "https://{host_name}/raisvc/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}" # pylint: disable=line-too-long + self._config3 = ClientConfiguration( + host_name=host_name, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + workspace_name=workspace_name, + credential=credential, + api_version="2024-07-01-preview", # TODO: Update me + credential_scopes="https://ml.azure.com/.default", + **kwargs3 + ) + _policies3 = kwargs3.pop("policies", None) + if _policies3 is None: + _policies3 = [ + policies.RequestIdPolicy(**kwargs3), + self._config3.headers_policy, + self._config3.user_agent_policy, + self._config3.proxy_policy, + policies.ContentDecodePolicy(**kwargs3), + self._config3.redirect_policy, + self._config3.retry_policy, + self._config3.authentication_policy, + self._config3.custom_hook_policy, + self._config3.logging_policy, + policies.DistributedTracingPolicy(**kwargs3), + policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, + self._config3.http_logging_policy, + ] + self._client3: PipelineClient = PipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + self.endpoints = EndpointsOperations(self._client1, self._config1, self._serialize, self._deserialize) + self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) + self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) + + @classmethod - def from_connection_string( - cls, - connection: str, - credential: "TokenCredential", - **kwargs - ) -> "AzureAIClient": + def from_connection_string(cls, connection: str, credential: "TokenCredential", **kwargs) -> "AzureAIClient": + """ + Create an AzureAIClient from a connection string. + + :param connection: The connection string, copied from your AI Studio project. + """ if not connection: raise ValueError("Connection string is required") parts = connection.split(";") - if len(parts) != 3: + if len(parts) != 4: raise ValueError("Invalid connection string format") - workspace_name = parts[0] - resource_group_name = parts[1] - subscription_id = parts[2] - return cls(subscription_id, resource_group_name, workspace_name, credential, **kwargs) + host_name = parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + workspace_name = parts[3] + return cls(host_name, subscription_id, resource_group_name, workspace_name, credential, **kwargs) + class SASTokenCredential(TokenCredential): def __init__( - self, - *, - sas_token: str, - credential: TokenCredential, - subscription_id: str, - resource_group_name: str, - workspace_name: str, - connection_name: str - ): + self, + *, + sas_token: str, + credential: TokenCredential, + subscription_id: str, + resource_group_name: str, + workspace_name: str, + connection_name: str + ): self._sas_token = sas_token self._credential = credential self._subscription_id = subscription_id @@ -57,12 +179,12 @@ def __init__( @classmethod def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime: - payload = jwt_token.split('.')[1] - padded_payload = payload + '=' * (4 - len(payload) % 4) # Add padding if necessary + payload = jwt_token.split(".")[1] + padded_payload = payload + "=" * (4 - len(payload) % 4) # Add padding if necessary decoded_bytes = base64.urlsafe_b64decode(padded_payload) - decoded_str = decoded_bytes.decode('utf-8') + decoded_str = decoded_bytes.decode("utf-8") decoded_payload = json.loads(decoded_str) - expiration_date = decoded_payload.get('exp') + expiration_date = decoded_payload.get("exp") return datetime.datetime.fromtimestamp(expiration_date, datetime.timezone.utc) def _refresh_token(self) -> None: @@ -74,10 +196,7 @@ def _refresh_token(self) -> None: workspace_name=self._workspace_name, ) - connection = ai_client.connections.get( - connection_name=self._connection_name, - populate_secrets=True - ) + connection = ai_client.connections.get(connection_name=self._connection_name, populate_secrets=True) self._sas_token = connection.properties.credentials.sas self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) @@ -92,7 +211,7 @@ def get_token(self) -> AccessToken: __all__: List[str] = [ "AzureAIClient", - "SASTokenCredential" + "SASTokenCredential", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py index 1548ec7b0de3..9a3b6a454a59 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py @@ -16,7 +16,7 @@ from .._serialization import Deserializer, Serializer from ._configuration import ClientConfiguration -from .operations import AssistantsOperations, EndpointsOperations, EvaluationsOperations +from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports @@ -28,15 +28,20 @@ class Client: # pylint: disable=client-accepts-api-version-keyword :ivar endpoints: EndpointsOperations operations :vartype endpoints: azure.ai.client.aio.operations.EndpointsOperations - :ivar assistants: AssistantsOperations operations - :vartype assistants: azure.ai.client.aio.operations.AssistantsOperations + :ivar agents: AgentsOperations operations + :vartype agents: azure.ai.client.aio.operations.AgentsOperations :ivar evaluations: EvaluationsOperations operations :vartype evaluations: azure.ai.client.aio.operations.EvaluationsOperations - :param subscription_id: The ID of the target subscription. Required. + :param host_name: The Azure AI Studio project host name, in the format + ``.api.azureml.ms`` or ``..api.azureml.ms``\\\\ + , where :code:`` is the Azure region where the project is deployed (e.g. westus) + and :code:`` is the GUID of the Enterprise private link. Required. + :type host_name: str + :param subscription_id: The Azure subscription ID. Required. :type subscription_id: str - :param resource_group_name: The name of the Resource Group. Required. + :param resource_group_name: The name of the Azure Resource Group. Required. :type resource_group_name: str - :param workspace_name: The name of the workspace (Azure AI Studio hub). Required. + :param workspace_name: The name of the Azure AI Studio hub. Required. :type workspace_name: str :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential @@ -48,14 +53,16 @@ class Client: # pylint: disable=client-accepts-api-version-keyword def __init__( self, + host_name: str, subscription_id: str, resource_group_name: str, workspace_name: str, credential: "AsyncTokenCredential", **kwargs: Any ) -> None: - _endpoint = "https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}" # pylint: disable=line-too-long + _endpoint = "https://{hostName}/{subscriptionId}/{resourceGroupName}/{workspaceName}" self._config = ClientConfiguration( + host_name=host_name, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, @@ -85,7 +92,7 @@ def __init__( self._deserialize = Deserializer() self._serialize.client_side_validation = False self.endpoints = EndpointsOperations(self._client, self._config, self._serialize, self._deserialize) - self.assistants = AssistantsOperations(self._client, self._config, self._serialize, self._deserialize) + self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) def send_request( @@ -110,6 +117,7 @@ def send_request( request_copy = deepcopy(request) path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py index de195b03fcda..6b0116f55f8d 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py @@ -23,11 +23,16 @@ class ClientConfiguration: # pylint: disable=too-many-instance-attributes Note that all parameters used to create this instance are saved as instance attributes. - :param subscription_id: The ID of the target subscription. Required. + :param host_name: The Azure AI Studio project host name, in the format + ``.api.azureml.ms`` or ``..api.azureml.ms``\\ , + where :code:`` is the Azure region where the project is deployed (e.g. westus) + and :code:`` is the GUID of the Enterprise private link. Required. + :type host_name: str + :param subscription_id: The Azure subscription ID. Required. :type subscription_id: str - :param resource_group_name: The name of the Resource Group. Required. + :param resource_group_name: The name of the Azure Resource Group. Required. :type resource_group_name: str - :param workspace_name: The name of the workspace (Azure AI Studio hub). Required. + :param workspace_name: The name of the Azure AI Studio hub. Required. :type workspace_name: str :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential @@ -39,6 +44,7 @@ class ClientConfiguration: # pylint: disable=too-many-instance-attributes def __init__( self, + host_name: str, subscription_id: str, resource_group_name: str, workspace_name: str, @@ -47,6 +53,8 @@ def __init__( ) -> None: api_version: str = kwargs.pop("api_version", "2024-07-01-preview") + if host_name is None: + raise ValueError("Parameter 'host_name' must not be None.") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") if resource_group_name is None: @@ -56,6 +64,7 @@ def __init__( if credential is None: raise ValueError("Parameter 'credential' must not be None.") + self.host_name = host_name self.subscription_id = subscription_id self.resource_group_name = resource_group_name self.workspace_name = workspace_name diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py index 6e2dd3e8d726..4c4fe956f18f 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py @@ -7,7 +7,7 @@ # -------------------------------------------------------------------------- from ._operations import EndpointsOperations -from ._operations import AssistantsOperations +from ._operations import AgentsOperations from ._operations import EvaluationsOperations from ._patch import __all__ as _patch_all @@ -16,7 +16,7 @@ __all__ = [ "EndpointsOperations", - "AssistantsOperations", + "AgentsOperations", "EvaluationsOperations", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py index abf0d2f6ac35..67ddf7d3bfa3 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py @@ -9,8 +9,10 @@ from io import IOBase import json import sys -from typing import Any, Callable, Dict, IO, Optional, Type, TypeVar, Union, overload +from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, Type, TypeVar, Union, overload +import urllib.parse +from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -23,12 +25,20 @@ ) from azure.core.pipeline import PipelineResponse from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async from azure.core.utils import case_insensitive_dict from ... import models as _models from ..._model_base import SdkJSONEncoder, _deserialize -from ...operations._operations import build_endpoints_list_request, build_endpoints_list_secrets_request +from ...operations._operations import ( + build_endpoints_list_request, + build_endpoints_list_secrets_request, + build_evaluations_evaluations_create_request, + build_evaluations_evaluations_get_request, + build_evaluations_evaluations_list_request, + build_evaluations_evaluations_update_request, +) if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -58,11 +68,11 @@ def __init__(self, *args, **kwargs) -> None: self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async - async def _list(self, **kwargs: Any) -> _models.ConnectionsListResponse: + async def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: # pylint: disable=protected-access """List the details of all the connections (not including their credentials). :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ConnectionsListResponse + :rtype: ~azure.ai.client.models._models.ConnectionsListResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object @@ -76,7 +86,9 @@ async def _list(self, **kwargs: Any) -> _models.ConnectionsListResponse: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ConnectionsListResponse] = kwargs.pop("cls", None) + cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop( # pylint: disable=protected-access + "cls", None + ) _request = build_endpoints_list_request( api_version=self._config.api_version, @@ -84,6 +96,7 @@ async def _list(self, **kwargs: Any) -> _models.ConnectionsListResponse: params=_params, ) path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" @@ -111,7 +124,9 @@ async def _list(self, **kwargs: Any) -> _models.ConnectionsListResponse: if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ConnectionsListResponse, response.json()) + deserialized = _deserialize( + _models._models.ConnectionsListResponse, response.json() # pylint: disable=protected-access + ) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -119,26 +134,11 @@ async def _list(self, **kwargs: Any) -> _models.ConnectionsListResponse: return deserialized # type: ignore @overload - async def _list_secrets( + async def _list_secrets( # pylint: disable=protected-access self, connection_name_in_url: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ConnectionsListSecretsResponse: - """Get the details of a single connection, including credential (if available). - - :param connection_name_in_url: Connection Name. Required. - :type connection_name_in_url: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - + ) -> _models._models.ConnectionsListSecretsResponse: ... @overload - async def _list_secrets( + async def _list_secrets( # pylint: disable=protected-access self, connection_name_in_url: str, *, @@ -149,52 +149,14 @@ async def _list_secrets( api_version_in_body: str, content_type: str = "application/json", **kwargs: Any - ) -> _models.ConnectionsListSecretsResponse: - """Get the details of a single connection, including credential (if available). - - :param connection_name_in_url: Connection Name. Required. - :type connection_name_in_url: str - :keyword connection_name: Connection Name (should be the same as the connection name in the URL - path). Required. - :paramtype connection_name: str - :keyword subscription_id: The ID of the target subscription. Required. - :paramtype subscription_id: str - :keyword resource_group_name: The name of the Resource Group. Required. - :paramtype resource_group_name: str - :keyword workspace_name: The name of the workspace (Azure AI Studio hub). Required. - :paramtype workspace_name: str - :keyword api_version_in_body: The api version. Required. - :paramtype api_version_in_body: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - + ) -> _models._models.ConnectionsListSecretsResponse: ... @overload - async def _list_secrets( + async def _list_secrets( # pylint: disable=protected-access self, connection_name_in_url: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ConnectionsListSecretsResponse: - """Get the details of a single connection, including credential (if available). - - :param connection_name_in_url: Connection Name. Required. - :type connection_name_in_url: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ + ) -> _models._models.ConnectionsListSecretsResponse: ... @distributed_trace_async - async def _list_secrets( + async def _list_secrets( # pylint: disable=protected-access self, connection_name_in_url: str, body: Union[JSON, IO[bytes]] = _Unset, @@ -205,7 +167,7 @@ async def _list_secrets( workspace_name: str = _Unset, api_version_in_body: str = _Unset, **kwargs: Any - ) -> _models.ConnectionsListSecretsResponse: + ) -> _models._models.ConnectionsListSecretsResponse: """Get the details of a single connection, including credential (if available). :param connection_name_in_url: Connection Name. Required. @@ -225,7 +187,7 @@ async def _list_secrets( :paramtype api_version_in_body: str :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse + :rtype: ~azure.ai.client.models._models.ConnectionsListSecretsResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object @@ -240,7 +202,9 @@ async def _list_secrets( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop( # pylint: disable=protected-access + "cls", None + ) if body is _Unset: if connection_name is _Unset: @@ -277,6 +241,7 @@ async def _list_secrets( params=_params, ) path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" @@ -304,7 +269,9 @@ async def _list_secrets( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ConnectionsListSecretsResponse, response.json()) + deserialized = _deserialize( + _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + ) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -312,14 +279,14 @@ async def _list_secrets( return deserialized # type: ignore -class AssistantsOperations: +class AgentsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.client.aio.Client`'s - :attr:`assistants` attribute. + :attr:`agents` attribute. """ def __init__(self, *args, **kwargs) -> None: @@ -346,3 +313,451 @@ def __init__(self, *args, **kwargs) -> None: self._config = input_args.pop(0) if input_args else kwargs.pop("config") self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + self.evaluations = EvaluationsEvaluationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + + +class EvaluationsEvaluationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.client.aio.Client`'s + :attr:`evaluations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: + """Get an evaluation. + + :param id: Identifier of the evaluation. Required. + :type id: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + _request = build_evaluations_evaluations_get_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create( + self, body: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Creates an evaluation. + + :param body: Properties of Evaluation. Required. + :type body: ~azure.ai.client.models.Evaluation + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Evaluation: + """Creates an evaluation. + + :param body: Properties of Evaluation. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Creates an evaluation. + + :param body: Properties of Evaluation. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create(self, body: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: + """Creates an evaluation. + + :param body: Properties of Evaluation. Is one of the following types: Evaluation, JSON, + IO[bytes] Required. + :type body: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_evaluations_create_request( + api_version=self._config.api_version, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list( + self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> AsyncIterable["_models.Evaluation"]: + """List evaluations. + + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of Evaluation + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.client.models.Evaluation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_evaluations_list_request( + api_version=self._config.api_version, + top=top, + skip=skip, + maxpagesize=maxpagesize, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url( + "self._config.workspace_name", self._config.workspace_name, "str" + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url( + "self._config.workspace_name", self._config.workspace_name, "str" + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @overload + async def update( + self, id: str, body: _models.UpdateEvaluationRequest, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Update an evaluation. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param body: Update evaluation request. Required. + :type body: ~azure.ai.client.models.UpdateEvaluationRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update( + self, id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Update an evaluation. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param body: Update evaluation request. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update( + self, id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Update an evaluation. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param body: Update evaluation request. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update( + self, id: str, body: Union[_models.UpdateEvaluationRequest, JSON, IO[bytes]], **kwargs: Any + ) -> _models.Evaluation: + """Update an evaluation. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param body: Update evaluation request. Is one of the following types: UpdateEvaluationRequest, + JSON, IO[bytes] Required. + :type body: ~azure.ai.client.models.UpdateEvaluationRequest or JSON or IO[bytes] + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_evaluations_update_request( + id=id, + api_version=self._config.api_version, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py index dbc55e6f4a35..e1e2d0284047 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py @@ -6,15 +6,14 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._models import ConnectionProperties -from ._models import ConnectionPropertiesAADAuth -from ._models import ConnectionPropertiesApiKeyAuth -from ._models import ConnectionPropertiesSASAuth -from ._models import ConnectionsListResponse -from ._models import ConnectionsListSecretsResponse -from ._models import CredentialsApiKeyAuth +from ._models import AppInsightsConfiguration +from ._models import Dataset +from ._models import Evaluation +from ._models import EvaluatorConfiguration +from ._models import InputData +from ._models import SystemData +from ._models import UpdateEvaluationRequest from ._models import CredentialsSASAuth - from ._enums import AuthenticationType from ._enums import EndpointType from ._patch import __all__ as _patch_all @@ -22,13 +21,13 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "ConnectionProperties", - "ConnectionPropertiesAADAuth", - "ConnectionPropertiesApiKeyAuth", - "ConnectionPropertiesSASAuth", - "ConnectionsListResponse", - "ConnectionsListSecretsResponse", - "CredentialsApiKeyAuth", + "AppInsightsConfiguration", + "Dataset", + "Evaluation", + "EvaluatorConfiguration", + "InputData", + "SystemData", + "UpdateEvaluationRequest", "CredentialsSASAuth", "AuthenticationType", "EndpointType", diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py index 9c4b6748d098..d8d49016bbef 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py @@ -28,5 +28,5 @@ class EndpointType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Azure OpenAI""" SERVERLESS = "Serverless" """Serverless API""" - AGENTS = "Agents" - """Agents""" + AGENT = "Agent" + """Agent""" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py index 881e4f32c105..1df6070a7b6e 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py @@ -7,7 +7,8 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Dict, List, Literal, Mapping, TYPE_CHECKING, Union, overload +import datetime +from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload from .. import _model_base from .._model_base import rest_discriminator, rest_field @@ -17,28 +18,32 @@ # pylint: disable=unused-import,ungrouped-imports from .. import models as _models -class ConnectionProperties(_model_base.Model): - """to do. + +class InputData(_model_base.Model): + """Abstract data class. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ConnectionPropertiesAADAuth, ConnectionPropertiesApiKeyAuth, ConnectionPropertiesSASAuth + AppInsightsConfiguration, Dataset - :ivar auth_type: Authentication type of the connection target. Required. Known values are: - "ApiKey", "AAD", and "SAS". - :vartype auth_type: str or ~azure.ai.client.models.AuthenticationType + :ivar type: Discriminator property for InputData. Required. Default value is None. + :vartype type: str + :ivar id: Evaluation input data. Required. + :vartype id: str """ __mapping__: Dict[str, _model_base.Model] = {} - auth_type: str = rest_discriminator(name="authType") - """Authentication type of the connection target. Required. Known values are: \"ApiKey\", \"AAD\", - and \"SAS\".""" + type: str = rest_discriminator(name="type") + """Discriminator property for InputData. Required. Default value is None.""" + id: str = rest_field() + """Evaluation input data. Required.""" @overload def __init__( self, *, - auth_type: str, + type: str, + id: str, # pylint: disable=redefined-builtin ): ... @overload @@ -52,34 +57,34 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) -class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): - """Connection properties for connections with AAD authentication (aka ``Entra ID passthrough``\\ - ). +class AppInsightsConfiguration(InputData, discriminator="app_insights"): + """Data Source for Application Insight. - :ivar auth_type: Authentication type of the connection target. Required. Entra ID - authentication - :vartype auth_type: str or ~azure.ai.client.models.AAD - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and - "Serverless". - :vartype category: str or ~azure.ai.client.models.EndpointType - :ivar target: to do. Required. - :vartype target: str + :ivar id: Evaluation input data. Required. + :vartype id: str + :ivar type: Required. Default value is "app_insights". + :vartype type: str + :ivar connection_string: Application Insight connection string. Required. + :vartype connection_string: str + :ivar query: Query to fetch data. Required. + :vartype query: str """ - auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore - """Authentication type of the connection target. Required. Entra ID authentication""" - category: Union[str, "_models.EndpointType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" - target: str = rest_field() - """to do. Required.""" + type: Literal["app_insights"] = rest_discriminator(name="type") # type: ignore + """Required. Default value is \"app_insights\".""" + connection_string: str = rest_field(name="connectionString") + """Application Insight connection string. Required.""" + query: str = rest_field() + """Query to fetch data. Required.""" @overload def __init__( self, *, - category: Union[str, "_models.EndpointType"], - target: str, + id: str, # pylint: disable=redefined-builtin + connection_string: str, + query: str, ): ... @overload @@ -90,7 +95,49 @@ def __init__(self, mapping: Mapping[str, Any]): """ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, auth_type=AuthenticationType.AAD, **kwargs) + super().__init__(*args, type="app_insights", **kwargs) + + +class ConnectionProperties(_model_base.Model): + """to do. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ConnectionPropertiesAADAuth, ConnectionPropertiesApiKeyAuth, ConnectionPropertiesSASAuth + + + :ivar auth_type: Authentication type of the connection target. Required. Known values are: + "ApiKey", "AAD", and "SAS". + :vartype auth_type: str or ~azure.ai.client.models.AuthenticationType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + auth_type: str = rest_discriminator(name="authType") + """Authentication type of the connection target. Required. Known values are: \"ApiKey\", \"AAD\", + and \"SAS\".""" + + +class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): + """Connection properties for connections with AAD authentication (aka ``Entra ID passthrough``\\ + ). + + + :ivar auth_type: Authentication type of the connection target. Required. Entra ID + authentication + :vartype auth_type: str or ~azure.ai.client.models.AAD + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", and "Agent". + :vartype category: str or ~azure.ai.client.models.EndpointType + :ivar target: to do. Required. + :vartype target: str + """ + + auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. Entra ID authentication""" + category: Union[str, "_models._enums.EndpointType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", and + \"Agent\".""" + target: str = rest_field() + """to do. Required.""" class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey"): @@ -99,43 +146,25 @@ class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey :ivar auth_type: Authentication type of the connection target. Required. API Key authentication :vartype auth_type: str or ~azure.ai.client.models.API_KEY - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and - "Serverless". + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", and "Agent". :vartype category: str or ~azure.ai.client.models.EndpointType :ivar credentials: Credentials will only be present for authType=ApiKey. Required. - :vartype credentials: ~azure.ai.client.models.CredentialsApiKeyAuth + :vartype credentials: ~azure.ai.client.models._models.CredentialsApiKeyAuth :ivar target: to do. Required. :vartype target: str """ auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. API Key authentication""" - category: Union[str, "_models.EndpointType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" - credentials: "_models.CredentialsApiKeyAuth" = rest_field() + category: Union[str, "_models._enums.EndpointType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", and + \"Agent\".""" + credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" target: str = rest_field() """to do. Required.""" - @overload - def __init__( - self, - *, - category: Union[str, "_models.EndpointType"], - credentials: "_models.CredentialsApiKeyAuth", - target: str, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, auth_type=AuthenticationType.API_KEY, **kwargs) - class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): """Connection properties for connections with SAS authentication. @@ -144,11 +173,11 @@ class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): :ivar auth_type: Authentication type of the connection target. Required. Shared Access Signature (SAS) authentication :vartype auth_type: str or ~azure.ai.client.models.SAS - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and - "Serverless". + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", and "Agent". :vartype category: str or ~azure.ai.client.models.EndpointType :ivar credentials: Credentials will only be present for authType=ApiKey. Required. - :vartype credentials: ~azure.ai.client.models.CredentialsSASAuth + :vartype credentials: ~azure.ai.client.models._models.CredentialsSASAuth :ivar target: to do. Required. :vartype target: str """ @@ -156,49 +185,85 @@ class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. Shared Access Signature (SAS) authentication""" - category: Union[str, "_models.EndpointType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" - credentials: "_models.CredentialsSASAuth" = rest_field() + category: Union[str, "_models._enums.EndpointType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", and + \"Agent\".""" + credentials: "_models._models.CredentialsSASAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" target: str = rest_field() """to do. Required.""" - @overload - def __init__( - self, - *, - category: Union[str, "_models.EndpointType"], - credentials: "_models.CredentialsSASAuth", - target: str, - ): ... - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ +class ConnectionsListResponse(_model_base.Model): + """to do. - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, auth_type=AuthenticationType.SAS, **kwargs) + :ivar value: to do. Required. + :vartype value: list[~azure.ai.client.models._models.ConnectionsListSecretsResponse] + """ + + value: List["_models._models.ConnectionsListSecretsResponse"] = rest_field() + """to do. Required.""" -class ConnectionsListResponse(_model_base.Model): + +class ConnectionsListSecretsResponse(_model_base.Model): """to do. - :ivar value: to do. Required. - :vartype value: list[~azure.ai.client.models.ConnectionsListSecretsResponse] + :ivar name: The name of the resource. Required. + :vartype name: str + :ivar properties: The properties of the resource. Required. + :vartype properties: ~azure.ai.client.models._models.ConnectionProperties """ - value: List["_models.ConnectionsListSecretsResponse"] = rest_field() + name: str = rest_field() + """The name of the resource. Required.""" + properties: "_models._models.ConnectionProperties" = rest_field() + """The properties of the resource. Required.""" + + +class CredentialsApiKeyAuth(_model_base.Model): + """to do. + + + :ivar key: to do. Required. + :vartype key: str + """ + + key: str = rest_field() """to do. Required.""" + +class CredentialsSASAuth(_model_base.Model): + """to do. + + + :ivar sas: to do. Required. + :vartype sas: str + """ + + sas: str = rest_field(name="SAS") + """to do. Required.""" + + +class Dataset(InputData, discriminator="dataset"): + """Dataset as source for evaluation. + + + :ivar id: Evaluation input data. Required. + :vartype id: str + :ivar type: Required. Default value is "dataset". + :vartype type: str + """ + + type: Literal["dataset"] = rest_discriminator(name="type") # type: ignore + """Required. Default value is \"dataset\".""" + @overload def __init__( self, *, - value: List["_models.ConnectionsListSecretsResponse"], + id: str, # pylint: disable=redefined-builtin ): ... @overload @@ -209,30 +274,71 @@ def __init__(self, mapping: Mapping[str, Any]): """ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class ConnectionsListSecretsResponse(_model_base.Model): - """to do. - - - :ivar name: The name of the resource. Required. - :vartype name: str - :ivar properties: The properties of the resource. Required. - :vartype properties: ~azure.ai.client.models.ConnectionProperties + super().__init__(*args, type="dataset", **kwargs) + + +class Evaluation(_model_base.Model): + """Evaluation Definition. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: Identifier of the evaluation. + :vartype id: str + :ivar data: Data for evaluation. Required. + :vartype data: ~azure.ai.client.models.InputData + :ivar display_name: Update stage to 'Archive' to archive the asset. Default is Development, + which means the asset is under development. + :vartype display_name: str + :ivar description: Description of the evaluation. It can be used to store additional + information about the evaluation and is mutable. + :vartype description: str + :ivar system_data: Metadata containing createdBy and modifiedBy information. + :vartype system_data: ~azure.ai.client.models.SystemData + :ivar status: Status of the evaluation. It is set by service and is read-only. + :vartype status: str + :ivar tags: Evaluation's tags. Unlike properties, tags are fully mutable. + :vartype tags: dict[str, str] + :ivar properties: Evaluation's properties. Unlike tags, properties are add-only. Once added, a + property cannot be removed. + :vartype properties: dict[str, str] + :ivar evaluators: Evaluators to be used for the evaluation. Required. + :vartype evaluators: dict[str, ~azure.ai.client.models.EvaluatorConfiguration] """ - name: str = rest_field() - """The name of the resource. Required.""" - properties: "_models.ConnectionProperties" = rest_field() - """The properties of the resource. Required.""" + id: Optional[str] = rest_field() + """Identifier of the evaluation.""" + data: "_models.InputData" = rest_field() + """Data for evaluation. Required.""" + display_name: Optional[str] = rest_field(name="displayName") + """Update stage to 'Archive' to archive the asset. Default is Development, which means the asset + is under development.""" + description: Optional[str] = rest_field() + """Description of the evaluation. It can be used to store additional information about the + evaluation and is mutable.""" + system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) + """Metadata containing createdBy and modifiedBy information.""" + status: Optional[str] = rest_field(visibility=["read"]) + """Status of the evaluation. It is set by service and is read-only.""" + tags: Optional[Dict[str, str]] = rest_field() + """Evaluation's tags. Unlike properties, tags are fully mutable.""" + properties: Optional[Dict[str, str]] = rest_field() + """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be + removed.""" + evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field() + """Evaluators to be used for the evaluation. Required.""" @overload def __init__( self, *, - name: str, - properties: "_models.ConnectionProperties", + data: "_models.InputData", + evaluators: Dict[str, "_models.EvaluatorConfiguration"], + id: Optional[str] = None, # pylint: disable=redefined-builtin + display_name: Optional[str] = None, + description: Optional[str] = None, + tags: Optional[Dict[str, str]] = None, + properties: Optional[Dict[str, str]] = None, ): ... @overload @@ -246,22 +352,32 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) -class CredentialsApiKeyAuth(_model_base.Model): - """to do. +class EvaluatorConfiguration(_model_base.Model): + """Evaluator Configuration. - :ivar key: to do. Required. - :vartype key: str + :ivar id: Identifier of the evaluator. Required. + :vartype id: str + :ivar init_params: Initialization parameters of the evaluator. + :vartype init_params: dict[str, any] + :ivar data_mapping: Data parameters of the evaluator. + :vartype data_mapping: dict[str, str] """ - key: str = rest_field() - """to do. Required.""" + id: str = rest_field() + """Identifier of the evaluator. Required.""" + init_params: Optional[Dict[str, Any]] = rest_field(name="initParams") + """Initialization parameters of the evaluator.""" + data_mapping: Optional[Dict[str, str]] = rest_field(name="dataMapping") + """Data parameters of the evaluator.""" @overload def __init__( self, *, - key: str, + id: str, # pylint: disable=redefined-builtin + init_params: Optional[Dict[str, Any]] = None, + data_mapping: Optional[Dict[str, str]] = None, ): ... @overload @@ -275,22 +391,60 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) -class CredentialsSASAuth(_model_base.Model): - """to do. +class SystemData(_model_base.Model): + """Metadata pertaining to creation and last modification of the resource. + Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar sas: to do. Required. - :vartype sas: str + :ivar created_at: The timestamp the resource was created at. + :vartype created_at: ~datetime.datetime + :ivar created_by: The identity that created the resource. + :vartype created_by: str + :ivar created_by_type: The identity type that created the resource. + :vartype created_by_type: str + :ivar last_modified_at: The timestamp of resource last modification (UTC). + :vartype last_modified_at: ~datetime.datetime """ - sas: str = rest_field(name="SAS") - """to do. Required.""" + created_at: Optional[datetime.datetime] = rest_field(name="createdAt", visibility=["read"], format="rfc3339") + """The timestamp the resource was created at.""" + created_by: Optional[str] = rest_field(name="createdBy", visibility=["read"]) + """The identity that created the resource.""" + created_by_type: Optional[str] = rest_field(name="createdByType", visibility=["read"]) + """The identity type that created the resource.""" + last_modified_at: Optional[datetime.datetime] = rest_field( + name="lastModifiedAt", visibility=["read"], format="rfc3339" + ) + """The timestamp of resource last modification (UTC).""" + + +class UpdateEvaluationRequest(_model_base.Model): + """Update Evaluation Request. + + All required parameters must be populated in order to send to server. + + :ivar tags: Tags to be updated. Required. + :vartype tags: dict[str, str] + :ivar display_name: Display Name. Required. + :vartype display_name: str + :ivar description: Description. Required. + :vartype description: str + """ + + tags: Dict[str, str] = rest_field() + """Tags to be updated. Required.""" + display_name: str = rest_field(name="displayName") + """Display Name. Required.""" + description: str = rest_field() + """Description. Required.""" @overload def __init__( self, *, - sas: str, + tags: Dict[str, str], + display_name: str, + description: str, ): ... @overload diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index 1ea7aa5f9e0c..47d82daf89b3 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -11,42 +11,43 @@ from azure.core.credentials import TokenCredential from ._models import ConnectionsListSecretsResponse + class EndpointProperties: - def __init__( - self, - *, - connection: ConnectionsListSecretsResponse, - token_credential: TokenCredential = None - ) -> None: - self.name=connection.name - self.authentication_type=connection.properties.auth_type - self.endpoint_type=connection.properties.category - self.endpoint_url=(connection.properties.target[:-1] if connection.properties.target.endswith("/") else connection.properties.target) + def __init__(self, *, connection: ConnectionsListSecretsResponse, token_credential: TokenCredential = None) -> None: + self.name = connection.name + self.authentication_type = connection.properties.auth_type + self.endpoint_type = connection.properties.category + self.endpoint_url = ( + connection.properties.target[:-1] + if connection.properties.target.endswith("/") + else connection.properties.target + ) self.key: str = None - if hasattr(connection.properties, 'credentials'): - if hasattr(connection.properties.credentials, 'key'): - self.key=connection.properties.credentials.key - self.token_credential=token_credential + if hasattr(connection.properties, "credentials"): + if hasattr(connection.properties.credentials, "key"): + self.key = connection.properties.credentials.key + self.token_credential = token_credential def __str__(self): out = "{\n" - out += f" \"name\": \"{self.name}\",\n" - out += f" \"authentication_type\": \"{self.authentication_type}\",\n" - out += f" \"endpoint_type\": \"{self.endpoint_type}\",\n" - out += f" \"endpoint_url\": \"{self.endpoint_url}\",\n" - out += f" \"key\": \"{self.key}\",\n" + out += f' "name": "{self.name}",\n' + out += f' "authentication_type": "{self.authentication_type}",\n' + out += f' "endpoint_type": "{self.endpoint_type}",\n' + out += f' "endpoint_url": "{self.endpoint_url}",\n' + out += f' "key": "{self.key}",\n' if self.token_credential: access_token = self.token_credential.get_token("https://cognitiveservices.azure.com/.default") - out += f" \"token_credential\": \"{access_token.token}\", expires on {access_token.expires_on} ({datetime.datetime.fromtimestamp(access_token.expires_on, datetime.timezone.utc)})\n" + out += f' "token_credential": "{access_token.token}", expires on {access_token.expires_on} ({datetime.datetime.fromtimestamp(access_token.expires_on, datetime.timezone.utc)})\n' else: - out += f" \"token_credential\": \"null\"\n" + out += f' "token_credential": "null"\n' out += "}\n" return out __all__: List[str] = [] # Add all objects you want publicly available to users at this package level + def patch_sdk(): """Do not remove from this file. diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py index 6e2dd3e8d726..4c4fe956f18f 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py @@ -7,7 +7,7 @@ # -------------------------------------------------------------------------- from ._operations import EndpointsOperations -from ._operations import AssistantsOperations +from ._operations import AgentsOperations from ._operations import EvaluationsOperations from ._patch import __all__ as _patch_all @@ -16,7 +16,7 @@ __all__ = [ "EndpointsOperations", - "AssistantsOperations", + "AgentsOperations", "EvaluationsOperations", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py index 876bc9a9289f..16413b3cd7c6 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py @@ -9,7 +9,8 @@ from io import IOBase import json import sys -from typing import Any, Callable, Dict, IO, Optional, Type, TypeVar, Union, overload +from typing import Any, Callable, Dict, IO, Iterable, List, Optional, Type, TypeVar, Union, overload +import urllib.parse from azure.core.exceptions import ( ClientAuthenticationError, @@ -21,6 +22,7 @@ StreamConsumedError, map_error, ) +from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.rest import HttpRequest, HttpResponse from azure.core.tracing.decorator import distributed_trace @@ -89,6 +91,113 @@ def build_endpoints_list_secrets_request(connection_name_in_url: str, **kwargs: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) +def build_evaluations_evaluations_get_request( # pylint: disable=name-too-long + id: str, *, api_version: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/{id}" + path_format_arguments = { + "id": _SERIALIZER.url("id", id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_evaluations_create_request( # pylint: disable=name-too-long + *, api_version: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/create" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_evaluations_list_request( # pylint: disable=name-too-long + *, + api_version: str, + top: Optional[int] = None, + skip: Optional[int] = None, + maxpagesize: Optional[int] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + if skip is not None: + _params["skip"] = _SERIALIZER.query("skip", skip, "int") + if maxpagesize is not None: + _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_evaluations_update_request( # pylint: disable=name-too-long + id: str, *, api_version: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/{id}" + path_format_arguments = { + "id": _SERIALIZER.url("id", id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + class EndpointsOperations: """ .. warning:: @@ -107,11 +216,11 @@ def __init__(self, *args, **kwargs): self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def _list(self, **kwargs: Any) -> _models.ConnectionsListResponse: + def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: # pylint: disable=protected-access """List the details of all the connections (not including their credentials). :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ConnectionsListResponse + :rtype: ~azure.ai.client.models._models.ConnectionsListResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object @@ -125,7 +234,9 @@ def _list(self, **kwargs: Any) -> _models.ConnectionsListResponse: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.ConnectionsListResponse] = kwargs.pop("cls", None) + cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop( # pylint: disable=protected-access + "cls", None + ) _request = build_endpoints_list_request( api_version=self._config.api_version, @@ -133,6 +244,7 @@ def _list(self, **kwargs: Any) -> _models.ConnectionsListResponse: params=_params, ) path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" @@ -160,7 +272,9 @@ def _list(self, **kwargs: Any) -> _models.ConnectionsListResponse: if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ConnectionsListResponse, response.json()) + deserialized = _deserialize( + _models._models.ConnectionsListResponse, response.json() # pylint: disable=protected-access + ) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -168,26 +282,11 @@ def _list(self, **kwargs: Any) -> _models.ConnectionsListResponse: return deserialized # type: ignore @overload - def _list_secrets( + def _list_secrets( # pylint: disable=protected-access self, connection_name_in_url: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ConnectionsListSecretsResponse: - """Get the details of a single connection, including credential (if available). - - :param connection_name_in_url: Connection Name. Required. - :type connection_name_in_url: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - + ) -> _models._models.ConnectionsListSecretsResponse: ... @overload - def _list_secrets( + def _list_secrets( # pylint: disable=protected-access self, connection_name_in_url: str, *, @@ -198,52 +297,14 @@ def _list_secrets( api_version_in_body: str, content_type: str = "application/json", **kwargs: Any - ) -> _models.ConnectionsListSecretsResponse: - """Get the details of a single connection, including credential (if available). - - :param connection_name_in_url: Connection Name. Required. - :type connection_name_in_url: str - :keyword connection_name: Connection Name (should be the same as the connection name in the URL - path). Required. - :paramtype connection_name: str - :keyword subscription_id: The ID of the target subscription. Required. - :paramtype subscription_id: str - :keyword resource_group_name: The name of the Resource Group. Required. - :paramtype resource_group_name: str - :keyword workspace_name: The name of the workspace (Azure AI Studio hub). Required. - :paramtype workspace_name: str - :keyword api_version_in_body: The api version. Required. - :paramtype api_version_in_body: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - + ) -> _models._models.ConnectionsListSecretsResponse: ... @overload - def _list_secrets( + def _list_secrets( # pylint: disable=protected-access self, connection_name_in_url: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ConnectionsListSecretsResponse: - """Get the details of a single connection, including credential (if available). - - :param connection_name_in_url: Connection Name. Required. - :type connection_name_in_url: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ + ) -> _models._models.ConnectionsListSecretsResponse: ... @distributed_trace - def _list_secrets( + def _list_secrets( # pylint: disable=protected-access self, connection_name_in_url: str, body: Union[JSON, IO[bytes]] = _Unset, @@ -254,7 +315,7 @@ def _list_secrets( workspace_name: str = _Unset, api_version_in_body: str = _Unset, **kwargs: Any - ) -> _models.ConnectionsListSecretsResponse: + ) -> _models._models.ConnectionsListSecretsResponse: """Get the details of a single connection, including credential (if available). :param connection_name_in_url: Connection Name. Required. @@ -274,7 +335,7 @@ def _list_secrets( :paramtype api_version_in_body: str :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ConnectionsListSecretsResponse + :rtype: ~azure.ai.client.models._models.ConnectionsListSecretsResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object @@ -289,7 +350,9 @@ def _list_secrets( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop( # pylint: disable=protected-access + "cls", None + ) if body is _Unset: if connection_name is _Unset: @@ -326,6 +389,7 @@ def _list_secrets( params=_params, ) path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" @@ -353,7 +417,9 @@ def _list_secrets( if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.ConnectionsListSecretsResponse, response.json()) + deserialized = _deserialize( + _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + ) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -361,14 +427,14 @@ def _list_secrets( return deserialized # type: ignore -class AssistantsOperations: +class AgentsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.client.Client`'s - :attr:`assistants` attribute. + :attr:`agents` attribute. """ def __init__(self, *args, **kwargs): @@ -395,3 +461,449 @@ def __init__(self, *args, **kwargs): self._config = input_args.pop(0) if input_args else kwargs.pop("config") self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + self.evaluations = EvaluationsEvaluationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + + +class EvaluationsEvaluationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.client.Client`'s + :attr:`evaluations` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get(self, id: str, **kwargs: Any) -> _models.Evaluation: + """Get an evaluation. + + :param id: Identifier of the evaluation. Required. + :type id: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + _request = build_evaluations_evaluations_get_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create( + self, body: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Creates an evaluation. + + :param body: Properties of Evaluation. Required. + :type body: ~azure.ai.client.models.Evaluation + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Evaluation: + """Creates an evaluation. + + :param body: Properties of Evaluation. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Evaluation: + """Creates an evaluation. + + :param body: Properties of Evaluation. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create(self, body: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: + """Creates an evaluation. + + :param body: Properties of Evaluation. Is one of the following types: Evaluation, JSON, + IO[bytes] Required. + :type body: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_evaluations_create_request( + api_version=self._config.api_version, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list( + self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> Iterable["_models.Evaluation"]: + """List evaluations. + + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of Evaluation + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.client.models.Evaluation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) + + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_evaluations_list_request( + api_version=self._config.api_version, + top=top, + skip=skip, + maxpagesize=maxpagesize, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url( + "self._config.workspace_name", self._config.workspace_name, "str" + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url( + "self._config.workspace_name", self._config.workspace_name, "str" + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @overload + def update( + self, id: str, body: _models.UpdateEvaluationRequest, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Update an evaluation. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param body: Update evaluation request. Required. + :type body: ~azure.ai.client.models.UpdateEvaluationRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update( + self, id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Update an evaluation. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param body: Update evaluation request. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update( + self, id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Update an evaluation. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param body: Update evaluation request. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update( + self, id: str, body: Union[_models.UpdateEvaluationRequest, JSON, IO[bytes]], **kwargs: Any + ) -> _models.Evaluation: + """Update an evaluation. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param body: Update evaluation request. Is one of the following types: UpdateEvaluationRequest, + JSON, IO[bytes] Required. + :type body: ~azure.ai.client.models.UpdateEvaluationRequest or JSON or IO[bytes] + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_evaluations_update_request( + id=id, + api_version=self._config.api_version, + content_type=content_type, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index 5b7b306e4e12..00b34dc560d8 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -7,20 +7,17 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ from typing import List, Iterable -#from zoneinfo import ZoneInfo + +# from zoneinfo import ZoneInfo from ._operations import EndpointsOperations as EndpointsOperationsGenerated from ..models._enums import AuthenticationType, EndpointType from ..models._models import ConnectionsListSecretsResponse, ConnectionsListResponse from ..models._patch import EndpointProperties + class EndpointsOperations(EndpointsOperationsGenerated): - def get_default( - self, - *, - endpoint_type: EndpointType, - populate_secrets: bool = False - ) -> EndpointProperties: + def get_default(self, *, endpoint_type: EndpointType, populate_secrets: bool = False) -> EndpointProperties: if not endpoint_type: raise ValueError("You must specify an endpoint type") endpoint_properties_list = self.list(endpoint_type=endpoint_type, populate_secrets=populate_secrets) @@ -30,13 +27,7 @@ def get_default( else: return None - - def get( - self, - *, - endpoint_name: str, - populate_secrets: bool = False - ) -> ConnectionsListSecretsResponse: + def get(self, *, endpoint_name: str, populate_secrets: bool = False) -> ConnectionsListSecretsResponse: if not endpoint_name: raise ValueError("Endpoint name cannot be empty") if populate_secrets: @@ -52,13 +43,15 @@ def get( return EndpointProperties(connection=connection, token_credential=self._config.credential) elif connection.properties.auth_type == AuthenticationType.SAS: from .._patch import SASTokenCredential + token_credential = SASTokenCredential( sas_token=connection.properties.credentials.sas, credential=self._config.credential, subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, workspace_name=self._config.workspace_name, - connection_name=endpoint_name) + connection_name=endpoint_name, + ) return EndpointProperties(connection=connection, token_credential=token_credential) return EndpointProperties(connection=connection) @@ -70,10 +63,7 @@ def get( return None def list( - self, - *, - endpoint_type: EndpointType | None = None, - populate_secrets: bool = False + self, *, endpoint_type: EndpointType | None = None, populate_secrets: bool = False ) -> Iterable[EndpointProperties]: # First make a REST call to /list to get all the connections, without secrets diff --git a/sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations.py b/sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations.py new file mode 100644 index 000000000000..13e69dbd88c1 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils import recorded_by_proxy +from testpreparer import ClientTestBase, Preparer + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestEvaluationsOperations(ClientTestBase): + @Preparer() + @recorded_by_proxy + def test_evaluations_evaluations_get(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.evaluations.evaluations.get( + id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_evaluations_evaluations_create(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.evaluations.evaluations.create( + body={ + "data": "input_data", + "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, + "description": "str", + "displayName": "str", + "id": "str", + "properties": {"str": "str"}, + "status": "str", + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + }, + "tags": {"str": "str"}, + }, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_evaluations_evaluations_list(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.evaluations.evaluations.list() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_evaluations_evaluations_update(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.evaluations.evaluations.update( + id="str", + body={"description": "str", "displayName": "str", "tags": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations_async.py b/sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations_async.py new file mode 100644 index 000000000000..345d84c7d13d --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations_async.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils.aio import recorded_by_proxy_async +from testpreparer import Preparer +from testpreparer_async import ClientTestBaseAsync + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestEvaluationsOperationsAsync(ClientTestBaseAsync): + @Preparer() + @recorded_by_proxy_async + async def test_evaluations_evaluations_get(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.evaluations.evaluations.get( + id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_evaluations_evaluations_create(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.evaluations.evaluations.create( + body={ + "data": "input_data", + "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, + "description": "str", + "displayName": "str", + "id": "str", + "properties": {"str": "str"}, + "status": "str", + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + }, + "tags": {"str": "str"}, + }, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_evaluations_evaluations_list(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = client.evaluations.evaluations.list() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_evaluations_evaluations_update(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.evaluations.evaluations.update( + id="str", + body={"description": "str", "displayName": "str", "tags": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/ai/azure-ai-client/requirements.txt b/sdk/ai/azure-ai-client/requirements.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index fe28866100cd..c24551396ce0 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -1,5 +1,4 @@ -""" # These are needed for SDK logging. You can ignore them. import sys import logging @@ -7,7 +6,6 @@ logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(stream=sys.stdout)) # End of logging setup -""" import os from azure.ai.client import AzureAIClient @@ -20,22 +18,24 @@ # Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;" +# At the moment, it should be in the format ";;;" ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=os.environ["AI_STUDIO_PROJECT_CONNECTION_STRING"], - #logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + connection=os.environ["AI_CLIENT_CONNECTION_STRING"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) # Or, you can create the Azure AI Client by giving all required parameters directly +""" ai_client = AzureAIClient( credential=DefaultAzureCredential(), - subscription_id=os.environ["AZURE_SUBSCRIPTION"], - resource_group_name=os.environ["AZURE_RESOURCE_GROUP"], - workspace_name=os.environ["AI_STUDIO_HUB"], - #logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) - +""" # You can list all endpoints of a particular "type", with or without their credentials: endpoints = ai_client.endpoints.list( @@ -57,7 +57,7 @@ # You can get an endpoint by its name: endpoint = ai_client.endpoints.get( - endpoint_name=os.environ["AI_STUDIO_CONNECTION_1"], # Required. + endpoint_name=os.environ["AI_CLIENT_ENDPOINT_NAME"], # Required. populate_secrets=True ) print("====> Print properties of a particular endpoint:") diff --git a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py index ddb532514155..07a021d58fb4 100644 --- a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py +++ b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py @@ -9,7 +9,7 @@ from azure.core.credentials import TokenCredential, AccessToken from azure.core.exceptions import HttpResponseError -#import azure.ai.client as sdk +# import azure.ai.client as sdk # Set to True to enable SDK logging LOGGING_ENABLED = True @@ -24,13 +24,15 @@ handler = logging.StreamHandler(stream=sys.stdout) logger.addHandler(handler) + class FakeTokenCredential(TokenCredential): def get_token(self, *scopes, **kwargs): # Create a fake token with an expiration time token = "fake_token" expires_on = datetime.datetime.now() + datetime.timedelta(hours=1) return AccessToken(token, expires_on.timestamp()) - + + # The test class name needs to start with "Test" to get collected by pytest class TestUnit: @@ -49,12 +51,11 @@ def test_sas_token_credential_class_mocked(self, **kwargs): token_duration_sec = 5 secret_key = "my_secret_key" token_duration_sec = 5 - sas_token_expiration: datetime = datetime.datetime.now(datetime.timezone.utc) + \ - datetime.timedelta(seconds=token_duration_sec) + sas_token_expiration: datetime = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta( + seconds=token_duration_sec + ) sas_token_expiration = sas_token_expiration.replace(microsecond=0) - payload = { - 'exp': sas_token_expiration - } + payload = {"exp": sas_token_expiration} sas_token = jwt.encode(payload, secret_key) # You can parse the token string on https://jwt.ms/. The "exp" value there is the @@ -70,12 +71,12 @@ def test_sas_token_credential_class_mocked(self, **kwargs): print(f"Generated JWT token: {sas_token}") sas_token_credential = SASTokenCredential( - sas_token = sas_token, - credential = FakeTokenCredential(), - subscription_id = "fake_subscription_id", - resource_group_name = "fake_resouce_group", - workspace_name = "fake_workspace_name", - connection_name = "fake_connection_name" + sas_token=sas_token, + credential=FakeTokenCredential(), + subscription_id="fake_subscription_id", + resource_group_name="fake_resouce_group", + workspace_name="fake_workspace_name", + connection_name="fake_connection_name", ) assert sas_token_credential._expires_on == sas_token_expiration @@ -90,24 +91,23 @@ def test_sas_token_credential_class_mocked(self, **kwargs): print(e) assert exception_caught - # Unit tests for the SASTokenCredential class def test_sas_token_credential_class_real(self, **kwargs): # Example of real SAS token for AOAI service. You can parse it on https://jwt.ms/. The "exp" value there is the # token expiration time in Unix timestamp format (seconds since 1970-01-01 00:00:00 UTC) token = "eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleTEiLCJ0eXAiOiJKV1QifQ.eyJyZWdpb24iOiJlYXN0dXMyZXVhcCIsInN1YnNjcmlwdGlvbi1pZCI6IjQyZjVlYWFjMjc5MDRiMGViMDI4ZTVkZjcyYzg5ZDAxIiwicHJvZHVjdC1pZCI6Ik9wZW5BSS5TMCIsImNvZ25pdGl2ZS1zZXJ2aWNlcy1lbmRwb2ludCI6Imh0dHBzOi8vYXBpLmNvZ25pdGl2ZS5taWNyb3NvZnQuY29tL2ludGVybmFsL3YxLjAvIiwiYXp1cmUtcmVzb3VyY2UtaWQiOiIvc3Vic2NyaXB0aW9ucy84ZjMzOGY2ZS00ZmNlLTQ0YWUtOTY5Yy1mYzdkOGZkYTAzMGUvcmVzb3VyY2VHcm91cHMvYXJncnlnb3JfY2FuYXJ5L3Byb3ZpZGVycy9NaWNyb3NvZnQuQ29nbml0aXZlU2VydmljZXMvYWNjb3VudHMvYXJncnlnb3ItY2FuYXJ5LWFvYWkiLCJzY29wZSI6Imh0dHBzOi8vc3BlZWNoLnBsYXRmb3JtLmJpbmcuY29tIiwiYXVkIjoidXJuOm1zLnNwZWVjaCIsImV4cCI6MTcyNjc4MjI0NiwiaXNzIjoidXJuOm1zLmNvZ25pdGl2ZXNlcnZpY2VzIn0.L7VvsXPzbwHQeMS-o9Za4itkU6uP4-KFMyOpTsYD9tpIJa_qChMHDl8FHy5n7K5L1coKg8sJE6LlJICFdU1ALQ" - expiration_date_linux_time = 1726782246 # Value of "exp" field in the token. See https://www.epochconverter.com/ to convert to date & time + expiration_date_linux_time = 1726782246 # Value of "exp" field in the token. See https://www.epochconverter.com/ to convert to date & time expiration_datatime_utc = datetime.datetime.fromtimestamp(expiration_date_linux_time, datetime.timezone.utc) print(f"\n[TEST] Expected expiration date: {expiration_datatime_utc}") sas_token_credential = SASTokenCredential( - sas_token = token, - credential = None, - subscription_id = None, - resource_group_name = None, - workspace_name = None, - connection_name = None + sas_token=token, + credential=None, + subscription_id=None, + resource_group_name=None, + workspace_name=None, + connection_name=None, ) print(f"[TEST] Actual expiration date: {sas_token_credential._expires_on}") diff --git a/sdk/ai/azure-ai-client/tsp-location.yaml b/sdk/ai/azure-ai-client/tsp-location.yaml index 17c143ca684b..2c6898474a6b 100644 --- a/sdk/ai/azure-ai-client/tsp-location.yaml +++ b/sdk/ai/azure-ai-client/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Client -commit: fa158c4ec75da8d4c5abb2aa3ca503372b465815 +commit: 5792edd5d87a7fae361e5bcc5852091e7bb4c264 repo: Azure/azure-rest-api-specs additionalDirectories: From 5b9ecad7234924d191e380c120ecf84432c3cf27 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 3 Oct 2024 00:15:19 -0700 Subject: [PATCH 007/138] Fix construction of the three endpoints --- sdk/ai/azure-ai-client/azure/ai/client/_patch.py | 12 ++++++------ .../samples/endpoints/sample_endpoints.py | 5 +++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index 49799ba2d1c0..dbc6980ee761 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -37,7 +37,7 @@ def __init__( kwargs3 = kwargs.copy() # For Endpoints operations (enumerating connections, getting SAS tokens) - _endpoint1 = "https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}" # pylint: disable=line-too-long + _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long self._config1 = ClientConfiguration( host_name=host_name, subscription_id=subscription_id, @@ -45,7 +45,7 @@ def __init__( workspace_name=workspace_name, credential=credential, api_version="2024-07-01-preview", - credential_scopes="https://management.azure.com/.default", + credential_scopes="https://management.azure.com", **kwargs1 ) _policies1 = kwargs1.pop("policies", None) @@ -68,7 +68,7 @@ def __init__( self._client1: PipelineClient = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) # For Agents operations - _endpoint2 = "https://{host_hame}/assistants/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}" # pylint: disable=line-too-long + _endpoint2 = f"https://{host_name}/assistants/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long self._config2 = ClientConfiguration( host_name=host_name, subscription_id=subscription_id, @@ -76,7 +76,7 @@ def __init__( workspace_name=workspace_name, credential=credential, api_version="2024-07-01-preview", # TODO: Update me - credential_scopes="https://ml.azure.com/.default", + credential_scopes="https://ml.azure.com", **kwargs2 ) _policies2 = kwargs2.pop("policies", None) @@ -99,7 +99,7 @@ def __init__( self._client2: PipelineClient = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) # For Cloud Evaluations operations - _endpoint3 = "https://{host_name}/raisvc/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}" # pylint: disable=line-too-long + _endpoint3 = f"https://{host_name}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long self._config3 = ClientConfiguration( host_name=host_name, subscription_id=subscription_id, @@ -107,7 +107,7 @@ def __init__( workspace_name=workspace_name, credential=credential, api_version="2024-07-01-preview", # TODO: Update me - credential_scopes="https://ml.azure.com/.default", + credential_scopes="https://ml.azure.com", **kwargs3 ) _policies3 = kwargs3.pop("policies", None) diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index c24551396ce0..a9372a3c684b 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -1,4 +1,5 @@ +""" # These are needed for SDK logging. You can ignore them. import sys import logging @@ -6,7 +7,7 @@ logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(stream=sys.stdout)) # End of logging setup - +""" import os from azure.ai.client import AzureAIClient from azure.ai.client.models import EndpointType, AuthenticationType @@ -22,7 +23,7 @@ ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), connection=os.environ["AI_CLIENT_CONNECTION_STRING"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + #logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) # Or, you can create the Azure AI Client by giving all required parameters directly From 8d332531f45fac1da5ca08a78f9593ad13539a3f Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 3 Oct 2024 10:27:13 -0700 Subject: [PATCH 008/138] Re-emit from TypeSpec. Rename host_name to endpoint --- .../azure/ai/client/_client.py | 19 ++++---- .../azure/ai/client/_configuration.py | 19 ++++---- .../azure-ai-client/azure/ai/client/_patch.py | 48 ++++++++++++------- .../azure/ai/client/aio/_client.py | 19 ++++---- .../azure/ai/client/aio/_configuration.py | 19 ++++---- .../ai/client/aio/operations/_operations.py | 14 +++--- .../azure/ai/client/operations/_operations.py | 14 +++--- sdk/ai/azure-ai-client/tsp-location.yaml | 2 +- 8 files changed, 87 insertions(+), 67 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/_client.py index 5406c3e3bb6d..5c4c8291d24c 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_client.py @@ -32,11 +32,12 @@ class Client: # pylint: disable=client-accepts-api-version-keyword :vartype agents: azure.ai.client.operations.AgentsOperations :ivar evaluations: EvaluationsOperations operations :vartype evaluations: azure.ai.client.operations.EvaluationsOperations - :param host_name: The Azure AI Studio project host name, in the format - ``.api.azureml.ms`` or ``..api.azureml.ms``\\\\ - , where :code:`` is the Azure region where the project is deployed (e.g. westus) - and :code:`` is the GUID of the Enterprise private link. Required. - :type host_name: str + :param endpoint: The Azure AI Studio project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``\\\\ , where + :code:`` is the Azure region where the project is deployed (e.g. westus) and + :code:`` is the GUID of the Enterprise private link. Required. + :type endpoint: str :param subscription_id: The Azure subscription ID. Required. :type subscription_id: str :param resource_group_name: The name of the Azure Resource Group. Required. @@ -53,16 +54,16 @@ class Client: # pylint: disable=client-accepts-api-version-keyword def __init__( self, - host_name: str, + endpoint: str, subscription_id: str, resource_group_name: str, workspace_name: str, credential: "TokenCredential", **kwargs: Any ) -> None: - _endpoint = "https://{hostName}/{subscriptionId}/{resourceGroupName}/{workspaceName}" + _endpoint = "{endpoint}/{subscriptionId}/{resourceGroupName}/{workspaceName}" self._config = ClientConfiguration( - host_name=host_name, + endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, @@ -115,7 +116,7 @@ def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: request_copy = deepcopy(request) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py index 0be11b6382d1..0cdfc43e60a2 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py @@ -23,11 +23,12 @@ class ClientConfiguration: # pylint: disable=too-many-instance-attributes Note that all parameters used to create this instance are saved as instance attributes. - :param host_name: The Azure AI Studio project host name, in the format - ``.api.azureml.ms`` or ``..api.azureml.ms``\\ , - where :code:`` is the Azure region where the project is deployed (e.g. westus) - and :code:`` is the GUID of the Enterprise private link. Required. - :type host_name: str + :param endpoint: The Azure AI Studio project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``\\ , where :code:`` + is the Azure region where the project is deployed (e.g. westus) and :code:`` + is the GUID of the Enterprise private link. Required. + :type endpoint: str :param subscription_id: The Azure subscription ID. Required. :type subscription_id: str :param resource_group_name: The name of the Azure Resource Group. Required. @@ -44,7 +45,7 @@ class ClientConfiguration: # pylint: disable=too-many-instance-attributes def __init__( self, - host_name: str, + endpoint: str, subscription_id: str, resource_group_name: str, workspace_name: str, @@ -53,8 +54,8 @@ def __init__( ) -> None: api_version: str = kwargs.pop("api_version", "2024-07-01-preview") - if host_name is None: - raise ValueError("Parameter 'host_name' must not be None.") + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") if resource_group_name is None: @@ -64,7 +65,7 @@ def __init__( if credential is None: raise ValueError("Parameter 'credential' must not be None.") - self.host_name = host_name + self.endpoint = endpoint self.subscription_id = subscription_id self.resource_group_name = resource_group_name self.workspace_name = workspace_name diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index dbc6980ee761..ce3f47e505e6 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -21,17 +21,34 @@ logger = logging.getLogger(__name__) + class AzureAIClient(ClientGenerated): def __init__( self, - host_name: str, + endpoint: str, subscription_id: str, resource_group_name: str, workspace_name: str, credential: "TokenCredential", - **kwargs: Any + **kwargs: Any, ) -> None: + # TODO: Validate input formats with regex match (e.g. subscription ID) + if not endpoint: + raise ValueError("endpoint is required") + if not subscription_id: + raise ValueError("subscription_id ID is required") + if not resource_group_name: + raise ValueError("resource_group_name is required") + if not workspace_name: + raise ValueError("workspace_name is required") + if not credential: + raise ValueError("Credential is required") + if "api_version" in kwargs: + raise ValueError("No support for overriding the API version") + if "credential_scopes" in kwargs: + raise ValueError("No support for overriding the credential scopes") + kwargs1 = kwargs.copy() kwargs2 = kwargs.copy() kwargs3 = kwargs.copy() @@ -39,14 +56,14 @@ def __init__( # For Endpoints operations (enumerating connections, getting SAS tokens) _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long self._config1 = ClientConfiguration( - host_name=host_name, + endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, credential=credential, api_version="2024-07-01-preview", credential_scopes="https://management.azure.com", - **kwargs1 + **kwargs1, ) _policies1 = kwargs1.pop("policies", None) if _policies1 is None: @@ -68,16 +85,16 @@ def __init__( self._client1: PipelineClient = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) # For Agents operations - _endpoint2 = f"https://{host_name}/assistants/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long + _endpoint2 = f"{endpoint}/assistants/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long self._config2 = ClientConfiguration( - host_name=host_name, + endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, credential=credential, - api_version="2024-07-01-preview", # TODO: Update me + api_version="2024-07-01-preview", # TODO: Update me credential_scopes="https://ml.azure.com", - **kwargs2 + **kwargs2, ) _policies2 = kwargs2.pop("policies", None) if _policies2 is None: @@ -99,16 +116,16 @@ def __init__( self._client2: PipelineClient = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) # For Cloud Evaluations operations - _endpoint3 = f"https://{host_name}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long + _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long self._config3 = ClientConfiguration( - host_name=host_name, + endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, credential=credential, - api_version="2024-07-01-preview", # TODO: Update me + api_version="2024-07-01-preview", # TODO: Update me credential_scopes="https://ml.azure.com", - **kwargs3 + **kwargs3, ) _policies3 = kwargs3.pop("policies", None) if _policies3 is None: @@ -137,7 +154,6 @@ def __init__( self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) - @classmethod def from_connection_string(cls, connection: str, credential: "TokenCredential", **kwargs) -> "AzureAIClient": """ @@ -150,11 +166,11 @@ def from_connection_string(cls, connection: str, credential: "TokenCredential", parts = connection.split(";") if len(parts) != 4: raise ValueError("Invalid connection string format") - host_name = parts[0] + endpoint = parts[0] subscription_id = parts[1] resource_group_name = parts[2] workspace_name = parts[3] - return cls(host_name, subscription_id, resource_group_name, workspace_name, credential, **kwargs) + return cls(endpoint, subscription_id, resource_group_name, workspace_name, credential, **kwargs) class SASTokenCredential(TokenCredential): @@ -166,7 +182,7 @@ def __init__( subscription_id: str, resource_group_name: str, workspace_name: str, - connection_name: str + connection_name: str, ): self._sas_token = sas_token self._credential = credential diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py index 9a3b6a454a59..37743361ac82 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py @@ -32,11 +32,12 @@ class Client: # pylint: disable=client-accepts-api-version-keyword :vartype agents: azure.ai.client.aio.operations.AgentsOperations :ivar evaluations: EvaluationsOperations operations :vartype evaluations: azure.ai.client.aio.operations.EvaluationsOperations - :param host_name: The Azure AI Studio project host name, in the format - ``.api.azureml.ms`` or ``..api.azureml.ms``\\\\ - , where :code:`` is the Azure region where the project is deployed (e.g. westus) - and :code:`` is the GUID of the Enterprise private link. Required. - :type host_name: str + :param endpoint: The Azure AI Studio project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``\\\\ , where + :code:`` is the Azure region where the project is deployed (e.g. westus) and + :code:`` is the GUID of the Enterprise private link. Required. + :type endpoint: str :param subscription_id: The Azure subscription ID. Required. :type subscription_id: str :param resource_group_name: The name of the Azure Resource Group. Required. @@ -53,16 +54,16 @@ class Client: # pylint: disable=client-accepts-api-version-keyword def __init__( self, - host_name: str, + endpoint: str, subscription_id: str, resource_group_name: str, workspace_name: str, credential: "AsyncTokenCredential", **kwargs: Any ) -> None: - _endpoint = "https://{hostName}/{subscriptionId}/{resourceGroupName}/{workspaceName}" + _endpoint = "{endpoint}/{subscriptionId}/{resourceGroupName}/{workspaceName}" self._config = ClientConfiguration( - host_name=host_name, + endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, @@ -117,7 +118,7 @@ def send_request( request_copy = deepcopy(request) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py index 6b0116f55f8d..d085552a2ed4 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py @@ -23,11 +23,12 @@ class ClientConfiguration: # pylint: disable=too-many-instance-attributes Note that all parameters used to create this instance are saved as instance attributes. - :param host_name: The Azure AI Studio project host name, in the format - ``.api.azureml.ms`` or ``..api.azureml.ms``\\ , - where :code:`` is the Azure region where the project is deployed (e.g. westus) - and :code:`` is the GUID of the Enterprise private link. Required. - :type host_name: str + :param endpoint: The Azure AI Studio project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``\\ , where :code:`` + is the Azure region where the project is deployed (e.g. westus) and :code:`` + is the GUID of the Enterprise private link. Required. + :type endpoint: str :param subscription_id: The Azure subscription ID. Required. :type subscription_id: str :param resource_group_name: The name of the Azure Resource Group. Required. @@ -44,7 +45,7 @@ class ClientConfiguration: # pylint: disable=too-many-instance-attributes def __init__( self, - host_name: str, + endpoint: str, subscription_id: str, resource_group_name: str, workspace_name: str, @@ -53,8 +54,8 @@ def __init__( ) -> None: api_version: str = kwargs.pop("api_version", "2024-07-01-preview") - if host_name is None: - raise ValueError("Parameter 'host_name' must not be None.") + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") if resource_group_name is None: @@ -64,7 +65,7 @@ def __init__( if credential is None: raise ValueError("Parameter 'credential' must not be None.") - self.host_name = host_name + self.endpoint = endpoint self.subscription_id = subscription_id self.resource_group_name = resource_group_name self.workspace_name = workspace_name diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py index 67ddf7d3bfa3..f04f7a6ef2cb 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py @@ -96,7 +96,7 @@ async def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: params=_params, ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" @@ -241,7 +241,7 @@ async def _list_secrets( # pylint: disable=protected-access params=_params, ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" @@ -366,7 +366,7 @@ async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: params=_params, ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" @@ -487,7 +487,7 @@ async def create(self, body: Union[_models.Evaluation, JSON, IO[bytes]], **kwarg params=_params, ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" @@ -562,7 +562,7 @@ def prepare_request(next_link=None): params=_params, ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url( "self._config.subscription_id", self._config.subscription_id, "str" ), @@ -589,7 +589,7 @@ def prepare_request(next_link=None): "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url( "self._config.subscription_id", self._config.subscription_id, "str" ), @@ -727,7 +727,7 @@ async def update( params=_params, ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py index 16413b3cd7c6..66a402f492c9 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py @@ -244,7 +244,7 @@ def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: # py params=_params, ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" @@ -389,7 +389,7 @@ def _list_secrets( # pylint: disable=protected-access params=_params, ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" @@ -514,7 +514,7 @@ def get(self, id: str, **kwargs: Any) -> _models.Evaluation: params=_params, ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" @@ -633,7 +633,7 @@ def create(self, body: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any params=_params, ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" @@ -708,7 +708,7 @@ def prepare_request(next_link=None): params=_params, ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url( "self._config.subscription_id", self._config.subscription_id, "str" ), @@ -735,7 +735,7 @@ def prepare_request(next_link=None): "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url( "self._config.subscription_id", self._config.subscription_id, "str" ), @@ -873,7 +873,7 @@ def update( params=_params, ) path_format_arguments = { - "hostName": self._serialize.url("self._config.host_name", self._config.host_name, "str"), + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" diff --git a/sdk/ai/azure-ai-client/tsp-location.yaml b/sdk/ai/azure-ai-client/tsp-location.yaml index 2c6898474a6b..352e32d6e00e 100644 --- a/sdk/ai/azure-ai-client/tsp-location.yaml +++ b/sdk/ai/azure-ai-client/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Client -commit: 5792edd5d87a7fae361e5bcc5852091e7bb4c264 +commit: e64174ba13dd23ee492bea01c6a6d61871be63e6 repo: Azure/azure-rest-api-specs additionalDirectories: From 46407141f7b6b613cb8be77fd1f203b741f44e4f Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 3 Oct 2024 11:32:54 -0700 Subject: [PATCH 009/138] Bug fix --- sdk/ai/azure-ai-client/azure/ai/client/_patch.py | 6 +++--- .../samples/endpoints/sample_endpoints.py | 11 ++++------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index ce3f47e505e6..5ea845580e5c 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -62,7 +62,7 @@ def __init__( workspace_name=workspace_name, credential=credential, api_version="2024-07-01-preview", - credential_scopes="https://management.azure.com", + credential_scopes=["https://management.azure.com"], **kwargs1, ) _policies1 = kwargs1.pop("policies", None) @@ -93,7 +93,7 @@ def __init__( workspace_name=workspace_name, credential=credential, api_version="2024-07-01-preview", # TODO: Update me - credential_scopes="https://ml.azure.com", + credential_scopes=["https://ml.azure.com"], **kwargs2, ) _policies2 = kwargs2.pop("policies", None) @@ -124,7 +124,7 @@ def __init__( workspace_name=workspace_name, credential=credential, api_version="2024-07-01-preview", # TODO: Update me - credential_scopes="https://ml.azure.com", + credential_scopes=["https://ml.azure.com"], **kwargs3, ) _policies3 = kwargs3.pop("policies", None) diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index a9372a3c684b..1eba47731dc0 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -17,26 +17,23 @@ from azure.identity import DefaultAzureCredential, get_bearer_token_provider from azure.core.credentials import AzureKeyCredential - # Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" +# At the moment, it should be in the format ";;;" ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), connection=os.environ["AI_CLIENT_CONNECTION_STRING"], - #logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + #logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) # Or, you can create the Azure AI Client by giving all required parameters directly -""" ai_client = AzureAIClient( credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], + endpoint=os.environ["AI_CLIENT_ENDPOINT"], subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) -""" # You can list all endpoints of a particular "type", with or without their credentials: endpoints = ai_client.endpoints.list( @@ -58,7 +55,7 @@ # You can get an endpoint by its name: endpoint = ai_client.endpoints.get( - endpoint_name=os.environ["AI_CLIENT_ENDPOINT_NAME"], # Required. + endpoint_name=os.environ["AI_CLIENT_CONNECTION_NAME"], # Required. populate_secrets=True ) print("====> Print properties of a particular endpoint:") From 9fdbccbbb4fedd515b4342381942ab642917fde6 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 3 Oct 2024 22:32:56 -0700 Subject: [PATCH 010/138] Add methods ai_client.inference.get_chat_completions_client() and ai_client.inference.get_azure_openai_client() --- .../azure-ai-client/azure/ai/client/_patch.py | 2 + .../azure/ai/client/operations/__init__.py | 1 + .../azure/ai/client/operations/_patch.py | 97 ++++++++++++++++++- .../samples/endpoints/sample_endpoints.py | 33 ++++++- 4 files changed, 128 insertions(+), 5 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index 5ea845580e5c..09cb8d66d7f5 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -17,6 +17,7 @@ from ._configuration import ClientConfiguration from ._serialization import Deserializer, Serializer from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations +from .operations._patch import InferenceOperations from ._client import Client as ClientGenerated logger = logging.getLogger(__name__) @@ -153,6 +154,7 @@ def __init__( self.endpoints = EndpointsOperations(self._client1, self._config1, self._serialize, self._deserialize) self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) + self.inference = InferenceOperations(self) @classmethod def from_connection_string(cls, connection: str, credential: "TokenCredential", **kwargs) -> "AzureAIClient": diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py index 4c4fe956f18f..4f2383d40c3e 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py @@ -18,6 +18,7 @@ "EndpointsOperations", "AgentsOperations", "EvaluationsOperations", + "InferenceOperations", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index 00b34dc560d8..ae8654c7bd9e 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -6,6 +6,8 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +import logging + from typing import List, Iterable # from zoneinfo import ZoneInfo @@ -14,6 +16,98 @@ from ..models._models import ConnectionsListSecretsResponse, ConnectionsListResponse from ..models._patch import EndpointProperties +logger = logging.getLogger(__name__) + +class InferenceOperations(): + + def __init__(self, outer_instance): + self.outer_instance = outer_instance + + + def get_chat_completions_client(self) -> "ChatComletionsClient": + endpoint = self.outer_instance.endpoints.get_default( + endpoint_type=EndpointType.SERVERLESS, + populate_secrets=True + ) + if not endpoint: + raise ValueError("No serverless endpoint found") + + try: + from azure.ai.inference import ChatCompletionsClient + except ModuleNotFoundError as _: + raise ModuleNotFoundError("Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'") + + if endpoint.authentication_type == AuthenticationType.API_KEY: + logger.debug("[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication") + from azure.core.credentials import AzureKeyCredential + client = ChatCompletionsClient( + endpoint=endpoint.endpoint_url, + credential=AzureKeyCredential(endpoint.key) + ) + elif endpoint.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + logger.debug("[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication") + client = ChatCompletionsClient( + endpoint=endpoint.endpoint_url, + credential=endpoint.properties.token_credential + ) + elif endpoint.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + logger.debug("[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication") + client = ChatCompletionsClient( + endpoint=endpoint.endpoint_url, + credential=endpoint.token_credential + ) + else: + raise ValueError("Unknown authentication type") + + return client + + + def get_azure_openai_client(self) -> "AzureOpenAI": + endpoint = self.outer_instance.endpoints.get_default( + endpoint_type=EndpointType.AZURE_OPEN_AI, + populate_secrets=True + ) + if not endpoint: + raise ValueError("No Azure OpenAI endpoint found") + + try: + from openai import AzureOpenAI + except ModuleNotFoundError as _: + raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai'") + + if endpoint.authentication_type == AuthenticationType.API_KEY: + logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication") + client = AzureOpenAI( + api_key=endpoint.key, + azure_endpoint=endpoint.endpoint_url, + api_version="2024-08-01-preview", # TODO: Is this needed? + ) + elif endpoint.authentication_type == AuthenticationType.AAD: + logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication") + try: + from azure.identity import get_bearer_token_provider + except ModuleNotFoundError as _: + raise ModuleNotFoundError("azure.identity package not installed. Please install it using 'pip install azure.identity'") + client = AzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider(endpoint.token_credential, "https://cognitiveservices.azure.com/.default"), + azure_endpoint=endpoint.endpoint_url, + api_version="2024-08-01-preview", + ) + elif endpoint.authentication_type == AuthenticationType.SAS: + logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") + client = AzureOpenAI( + azure_ad_token_provider=get_bearer_token_provider(endpoint.token_credential, "https://cognitiveservices.azure.com/.default"), + azure_endpoint=endpoint.endpoint_url, + api_version="2024-08-01-preview", + ) + else: + raise ValueError("Unknown authentication type") + + return client + class EndpointsOperations(EndpointsOperationsGenerated): @@ -82,7 +176,8 @@ def list( __all__: List[str] = [ - "EndpointsOperations" + "EndpointsOperations", + "InferenceOperations" ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index 1eba47731dc0..270230ca0344 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -1,5 +1,4 @@ -""" # These are needed for SDK logging. You can ignore them. import sys import logging @@ -7,7 +6,7 @@ logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(stream=sys.stdout)) # End of logging setup -""" + import os from azure.ai.client import AzureAIClient from azure.ai.client.models import EndpointType, AuthenticationType @@ -22,7 +21,7 @@ ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), connection=os.environ["AI_CLIENT_CONNECTION_STRING"], - #logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) # Or, you can create the Azure AI Client by giving all required parameters directly @@ -35,6 +34,32 @@ logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) +# You can get an authenticated azure.ai.inference client directly, if you have a serverless endpoint in your project: +client = ai_client.inference.get_chat_completions_client() + +response = client.complete( + messages=[ + UserMessage(content="How many feet are in a mile?") + ] +) + +print(response.choices[0].message.content) + +# You can get an authenticated AzureOpenAI client directly, if you have an Azure OpenAI endpoint in your project: +client = ai_client.inference.get_azure_openai_client() + +response = client.chat.completions.create( + model="gpt-4-0613", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], +) + +print(response.choices[0].message.content) + # You can list all endpoints of a particular "type", with or without their credentials: endpoints = ai_client.endpoints.list( endpoint_type=EndpointType.AZURE_OPEN_AI, # Optional. Defaults to all types. @@ -106,7 +131,7 @@ print("====> Creating ChatCompletionsClient using API key authentication") client = ChatCompletionsClient( endpoint=endpoint.endpoint_url, - credential=AzureKeyCredential(endpoint.properties.credentials.key) + credential=AzureKeyCredential(endpoint.key) ) elif endpoint.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth From b1a921560a56af684e5ced76c57dbbf66ead36a3 Mon Sep 17 00:00:00 2001 From: Ankit Singhal <30610298+singankit@users.noreply.github.com> Date: Fri, 4 Oct 2024 11:34:37 -0700 Subject: [PATCH 011/138] Adding sample and changing client name (#37724) --- .../azure/ai/client/__init__.py | 4 +- .../azure/ai/client/_client.py | 8 +- .../azure/ai/client/_configuration.py | 4 +- .../azure-ai-client/azure/ai/client/_patch.py | 6 +- .../azure/ai/client/aio/__init__.py | 4 +- .../azure/ai/client/aio/_client.py | 8 +- .../azure/ai/client/aio/_configuration.py | 4 +- .../ai/client/aio/operations/_operations.py | 247 +++++++------- .../azure/ai/client/models/__init__.py | 6 - .../azure/ai/client/models/_models.py | 6 +- .../azure/ai/client/operations/_operations.py | 308 ++++++++---------- .../generated_tests/conftest.py | 16 +- .../test_azure_ai_evaluations_operations.py | 71 ++++ ...t_azure_ai_evaluations_operations_async.py | 72 ++++ .../generated_tests/testpreparer.py | 10 +- .../generated_tests/testpreparer_async.py | 8 +- .../samples/endpoints/sample_endpoints.py | 41 +-- .../samples/evaluations/sample_evaluations.py | 95 ++++++ sdk/ai/azure-ai-client/tsp-location.yaml | 2 +- 19 files changed, 554 insertions(+), 366 deletions(-) create mode 100644 sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations.py create mode 100644 sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations_async.py create mode 100644 sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py diff --git a/sdk/ai/azure-ai-client/azure/ai/client/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/__init__.py index 7b2a5f45e5ef..a8002a0739d5 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/__init__.py @@ -6,7 +6,7 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._client import Client +from ._client import AzureAIClient from ._version import VERSION __version__ = VERSION @@ -19,7 +19,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "Client", + "AzureAIClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/_client.py index 5c4c8291d24c..4bec476e91db 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_client.py @@ -14,7 +14,7 @@ from azure.core.pipeline import policies from azure.core.rest import HttpRequest, HttpResponse -from ._configuration import ClientConfiguration +from ._configuration import AzureAIClientConfiguration from ._serialization import Deserializer, Serializer from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations @@ -23,8 +23,8 @@ from azure.core.credentials import TokenCredential -class Client: # pylint: disable=client-accepts-api-version-keyword - """Client. +class AzureAIClient: # pylint: disable=client-accepts-api-version-keyword + """AzureAIClient. :ivar endpoints: EndpointsOperations operations :vartype endpoints: azure.ai.client.operations.EndpointsOperations @@ -62,7 +62,7 @@ def __init__( **kwargs: Any ) -> None: _endpoint = "{endpoint}/{subscriptionId}/{resourceGroupName}/{workspaceName}" - self._config = ClientConfiguration( + self._config = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py index 0cdfc43e60a2..e6af9a8ee17e 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py @@ -17,8 +17,8 @@ from azure.core.credentials import TokenCredential -class ClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for Client. +class AzureAIClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AzureAIClient. Note that all parameters used to create this instance are saved as instance attributes. diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index 09cb8d66d7f5..e6c73772dbc9 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -14,11 +14,11 @@ from azure.core.credentials import TokenCredential, AccessToken from azure.core import PipelineClient from azure.core.pipeline import policies -from ._configuration import ClientConfiguration +from ._configuration import AzureAIClientConfiguration as ClientConfiguration from ._serialization import Deserializer, Serializer from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations +from ._client import AzureAIClient as ClientGenerated from .operations._patch import InferenceOperations -from ._client import Client as ClientGenerated logger = logging.getLogger(__name__) @@ -125,7 +125,7 @@ def __init__( workspace_name=workspace_name, credential=credential, api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://ml.azure.com"], + credential_scopes=["https://management.azure.com"], # TODO: Update once service changes are ready **kwargs3, ) _policies3 = kwargs3.pop("policies", None) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py index 80bfbb6d392d..682d7f1b46a7 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py @@ -6,7 +6,7 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._client import Client +from ._client import AzureAIClient try: from ._patch import __all__ as _patch_all @@ -16,7 +16,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "Client", + "AzureAIClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py index 37743361ac82..7eea6e0e2690 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py @@ -15,7 +15,7 @@ from azure.core.rest import AsyncHttpResponse, HttpRequest from .._serialization import Deserializer, Serializer -from ._configuration import ClientConfiguration +from ._configuration import AzureAIClientConfiguration from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations if TYPE_CHECKING: @@ -23,8 +23,8 @@ from azure.core.credentials_async import AsyncTokenCredential -class Client: # pylint: disable=client-accepts-api-version-keyword - """Client. +class AzureAIClient: # pylint: disable=client-accepts-api-version-keyword + """AzureAIClient. :ivar endpoints: EndpointsOperations operations :vartype endpoints: azure.ai.client.aio.operations.EndpointsOperations @@ -62,7 +62,7 @@ def __init__( **kwargs: Any ) -> None: _endpoint = "{endpoint}/{subscriptionId}/{resourceGroupName}/{workspaceName}" - self._config = ClientConfiguration( + self._config = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py index d085552a2ed4..7ba861c532db 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py @@ -17,8 +17,8 @@ from azure.core.credentials_async import AsyncTokenCredential -class ClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for Client. +class AzureAIClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AzureAIClient. Note that all parameters used to create this instance are saved as instance attributes. diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py index f04f7a6ef2cb..b5331cd78933 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py @@ -34,10 +34,10 @@ from ...operations._operations import ( build_endpoints_list_request, build_endpoints_list_secrets_request, - build_evaluations_evaluations_create_request, - build_evaluations_evaluations_get_request, - build_evaluations_evaluations_list_request, - build_evaluations_evaluations_update_request, + build_evaluations_create_request, + build_evaluations_get_request, + build_evaluations_list_request, + build_evaluations_update_request, ) if sys.version_info >= (3, 9): @@ -56,7 +56,7 @@ class EndpointsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.ai.client.aio.Client`'s + :class:`~azure.ai.client.aio.AzureAIClient`'s :attr:`endpoints` attribute. """ @@ -285,7 +285,7 @@ class AgentsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.ai.client.aio.Client`'s + :class:`~azure.ai.client.aio.AzureAIClient`'s :attr:`agents` attribute. """ @@ -303,7 +303,7 @@ class EvaluationsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.ai.client.aio.Client`'s + :class:`~azure.ai.client.aio.AzureAIClient`'s :attr:`evaluations` attribute. """ @@ -314,101 +314,14 @@ def __init__(self, *args, **kwargs) -> None: self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - self.evaluations = EvaluationsEvaluationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - - -class EvaluationsEvaluationsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.client.aio.Client`'s - :attr:`evaluations` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace_async - async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: - """Get an evaluation. - - :param id: Identifier of the evaluation. Required. - :type id: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - _request = build_evaluations_evaluations_get_request( - id=id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - @overload async def create( - self, body: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any + self, evaluation: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any ) -> _models.Evaluation: """Creates an evaluation. - :param body: Properties of Evaluation. Required. - :type body: ~azure.ai.client.models.Evaluation + :param evaluation: Properties of Evaluation. Required. + :type evaluation: ~azure.ai.client.models.Evaluation :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -418,11 +331,13 @@ async def create( """ @overload - async def create(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Evaluation: + async def create( + self, evaluation: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: """Creates an evaluation. - :param body: Properties of Evaluation. Required. - :type body: JSON + :param evaluation: Properties of Evaluation. Required. + :type evaluation: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -433,12 +348,12 @@ async def create(self, body: JSON, *, content_type: str = "application/json", ** @overload async def create( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, evaluation: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.Evaluation: """Creates an evaluation. - :param body: Properties of Evaluation. Required. - :type body: IO[bytes] + :param evaluation: Properties of Evaluation. Required. + :type evaluation: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -448,12 +363,12 @@ async def create( """ @distributed_trace_async - async def create(self, body: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: + async def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: """Creates an evaluation. - :param body: Properties of Evaluation. Is one of the following types: Evaluation, JSON, + :param evaluation: Properties of Evaluation. Is one of the following types: Evaluation, JSON, IO[bytes] Required. - :type body: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] + :type evaluation: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] :return: Evaluation. The Evaluation is compatible with MutableMapping :rtype: ~azure.ai.client.models.Evaluation :raises ~azure.core.exceptions.HttpResponseError: @@ -474,14 +389,14 @@ async def create(self, body: Union[_models.Evaluation, JSON, IO[bytes]], **kwarg content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(evaluation, (IOBase, bytes)): + _content = evaluation else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(evaluation, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_evaluations_evaluations_create_request( - api_version=self._config.api_version, + _request = build_evaluations_create_request( content_type=content_type, + api_version=self._config.api_version, content=_content, headers=_headers, params=_params, @@ -553,11 +468,11 @@ def list( def prepare_request(next_link=None): if not next_link: - _request = build_evaluations_evaluations_list_request( - api_version=self._config.api_version, + _request = build_evaluations_list_request( top=top, skip=skip, maxpagesize=maxpagesize, + api_version=self._config.api_version, headers=_headers, params=_params, ) @@ -630,14 +545,19 @@ async def get_next(next_link=None): @overload async def update( - self, id: str, body: _models.UpdateEvaluationRequest, *, content_type: str = "application/json", **kwargs: Any + self, + id: str, + update_request: _models.UpdateEvaluationRequest, + *, + content_type: str = "application/json", + **kwargs: Any ) -> _models.Evaluation: """Update an evaluation. :param id: Identifier of the evaluation. Required. :type id: str - :param body: Update evaluation request. Required. - :type body: ~azure.ai.client.models.UpdateEvaluationRequest + :param update_request: Update evaluation request. Required. + :type update_request: ~azure.ai.client.models.UpdateEvaluationRequest :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -648,14 +568,14 @@ async def update( @overload async def update( - self, id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, id: str, update_request: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.Evaluation: """Update an evaluation. :param id: Identifier of the evaluation. Required. :type id: str - :param body: Update evaluation request. Required. - :type body: JSON + :param update_request: Update evaluation request. Required. + :type update_request: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -666,14 +586,14 @@ async def update( @overload async def update( - self, id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, id: str, update_request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.Evaluation: """Update an evaluation. :param id: Identifier of the evaluation. Required. :type id: str - :param body: Update evaluation request. Required. - :type body: IO[bytes] + :param update_request: Update evaluation request. Required. + :type update_request: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -684,15 +604,15 @@ async def update( @distributed_trace_async async def update( - self, id: str, body: Union[_models.UpdateEvaluationRequest, JSON, IO[bytes]], **kwargs: Any + self, id: str, update_request: Union[_models.UpdateEvaluationRequest, JSON, IO[bytes]], **kwargs: Any ) -> _models.Evaluation: """Update an evaluation. :param id: Identifier of the evaluation. Required. :type id: str - :param body: Update evaluation request. Is one of the following types: UpdateEvaluationRequest, - JSON, IO[bytes] Required. - :type body: ~azure.ai.client.models.UpdateEvaluationRequest or JSON or IO[bytes] + :param update_request: Update evaluation request. Is one of the following types: + UpdateEvaluationRequest, JSON, IO[bytes] Required. + :type update_request: ~azure.ai.client.models.UpdateEvaluationRequest or JSON or IO[bytes] :return: Evaluation. The Evaluation is compatible with MutableMapping :rtype: ~azure.ai.client.models.Evaluation :raises ~azure.core.exceptions.HttpResponseError: @@ -713,15 +633,15 @@ async def update( content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(update_request, (IOBase, bytes)): + _content = update_request else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(update_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_evaluations_evaluations_update_request( + _request = build_evaluations_update_request( id=id, - api_version=self._config.api_version, content_type=content_type, + api_version=self._config.api_version, content=_content, headers=_headers, params=_params, @@ -761,3 +681,68 @@ async def update( return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @distributed_trace_async + async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: + """Get an evaluation. + + :param id: Identifier of the evaluation. Required. + :type id: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + _request = build_evaluations_get_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py index e1e2d0284047..fecabd03f9ef 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py @@ -13,9 +13,6 @@ from ._models import InputData from ._models import SystemData from ._models import UpdateEvaluationRequest -from ._models import CredentialsSASAuth -from ._enums import AuthenticationType -from ._enums import EndpointType from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import from ._patch import patch_sdk as _patch_sdk @@ -28,9 +25,6 @@ "InputData", "SystemData", "UpdateEvaluationRequest", - "CredentialsSASAuth", - "AuthenticationType", - "EndpointType", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py index 1df6070a7b6e..822eb40c3233 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py @@ -35,7 +35,7 @@ class InputData(_model_base.Model): __mapping__: Dict[str, _model_base.Model] = {} type: str = rest_discriminator(name="type") """Discriminator property for InputData. Required. Default value is None.""" - id: str = rest_field() + id: str = rest_field(name="Uri") """Evaluation input data. Required.""" @overload @@ -249,6 +249,8 @@ class CredentialsSASAuth(_model_base.Model): class Dataset(InputData, discriminator="dataset"): """Dataset as source for evaluation. + Readonly variables are only populated by the server, and will be ignored when sending a request. + :ivar id: Evaluation input data. Required. :vartype id: str @@ -256,7 +258,7 @@ class Dataset(InputData, discriminator="dataset"): :vartype type: str """ - type: Literal["dataset"] = rest_discriminator(name="type") # type: ignore + type: Literal["dataset"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore """Required. Default value is \"dataset\".""" @overload diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py index 66a402f492c9..8139efd97aac 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py @@ -91,38 +91,12 @@ def build_endpoints_list_secrets_request(connection_name_in_url: str, **kwargs: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_evaluations_evaluations_get_request( # pylint: disable=name-too-long - id: str, *, api_version: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/{id}" - path_format_arguments = { - "id": _SERIALIZER.url("id", id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_evaluations_create_request( # pylint: disable=name-too-long - *, api_version: str, **kwargs: Any -) -> HttpRequest: +def build_evaluations_create_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -139,17 +113,13 @@ def build_evaluations_evaluations_create_request( # pylint: disable=name-too-lo return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_evaluations_evaluations_list_request( # pylint: disable=name-too-long - *, - api_version: str, - top: Optional[int] = None, - skip: Optional[int] = None, - maxpagesize: Optional[int] = None, - **kwargs: Any +def build_evaluations_list_request( + *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -170,13 +140,12 @@ def build_evaluations_evaluations_list_request( # pylint: disable=name-too-long return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_evaluations_evaluations_update_request( # pylint: disable=name-too-long - id: str, *, api_version: str, **kwargs: Any -) -> HttpRequest: +def build_evaluations_update_request(id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -198,13 +167,37 @@ def build_evaluations_evaluations_update_request( # pylint: disable=name-too-lo return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) +def build_evaluations_get_request(id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/{id}" + path_format_arguments = { + "id": _SERIALIZER.url("id", id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + class EndpointsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.ai.client.Client`'s + :class:`~azure.ai.client.AzureAIClient`'s :attr:`endpoints` attribute. """ @@ -433,7 +426,7 @@ class AgentsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.ai.client.Client`'s + :class:`~azure.ai.client.AzureAIClient`'s :attr:`agents` attribute. """ @@ -451,29 +444,7 @@ class EvaluationsOperations: **DO NOT** instantiate this class directly. Instead, you should access the following operations through - :class:`~azure.ai.client.Client`'s - :attr:`evaluations` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - self.evaluations = EvaluationsEvaluationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - - -class EvaluationsEvaluationsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.client.Client`'s + :class:`~azure.ai.client.AzureAIClient`'s :attr:`evaluations` attribute. """ @@ -484,79 +455,14 @@ def __init__(self, *args, **kwargs): self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - @distributed_trace - def get(self, id: str, **kwargs: Any) -> _models.Evaluation: - """Get an evaluation. - - :param id: Identifier of the evaluation. Required. - :type id: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - _request = build_evaluations_evaluations_get_request( - id=id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - @overload def create( - self, body: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any + self, evaluation: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any ) -> _models.Evaluation: """Creates an evaluation. - :param body: Properties of Evaluation. Required. - :type body: ~azure.ai.client.models.Evaluation + :param evaluation: Properties of Evaluation. Required. + :type evaluation: ~azure.ai.client.models.Evaluation :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -566,11 +472,11 @@ def create( """ @overload - def create(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Evaluation: + def create(self, evaluation: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Evaluation: """Creates an evaluation. - :param body: Properties of Evaluation. Required. - :type body: JSON + :param evaluation: Properties of Evaluation. Required. + :type evaluation: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -580,11 +486,13 @@ def create(self, body: JSON, *, content_type: str = "application/json", **kwargs """ @overload - def create(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Evaluation: + def create( + self, evaluation: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: """Creates an evaluation. - :param body: Properties of Evaluation. Required. - :type body: IO[bytes] + :param evaluation: Properties of Evaluation. Required. + :type evaluation: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -594,12 +502,12 @@ def create(self, body: IO[bytes], *, content_type: str = "application/json", **k """ @distributed_trace - def create(self, body: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: + def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: """Creates an evaluation. - :param body: Properties of Evaluation. Is one of the following types: Evaluation, JSON, + :param evaluation: Properties of Evaluation. Is one of the following types: Evaluation, JSON, IO[bytes] Required. - :type body: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] + :type evaluation: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] :return: Evaluation. The Evaluation is compatible with MutableMapping :rtype: ~azure.ai.client.models.Evaluation :raises ~azure.core.exceptions.HttpResponseError: @@ -620,14 +528,14 @@ def create(self, body: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(evaluation, (IOBase, bytes)): + _content = evaluation else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(evaluation, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_evaluations_evaluations_create_request( - api_version=self._config.api_version, + _request = build_evaluations_create_request( content_type=content_type, + api_version=self._config.api_version, content=_content, headers=_headers, params=_params, @@ -699,11 +607,11 @@ def list( def prepare_request(next_link=None): if not next_link: - _request = build_evaluations_evaluations_list_request( - api_version=self._config.api_version, + _request = build_evaluations_list_request( top=top, skip=skip, maxpagesize=maxpagesize, + api_version=self._config.api_version, headers=_headers, params=_params, ) @@ -776,14 +684,19 @@ def get_next(next_link=None): @overload def update( - self, id: str, body: _models.UpdateEvaluationRequest, *, content_type: str = "application/json", **kwargs: Any + self, + id: str, + update_request: _models.UpdateEvaluationRequest, + *, + content_type: str = "application/json", + **kwargs: Any ) -> _models.Evaluation: """Update an evaluation. :param id: Identifier of the evaluation. Required. :type id: str - :param body: Update evaluation request. Required. - :type body: ~azure.ai.client.models.UpdateEvaluationRequest + :param update_request: Update evaluation request. Required. + :type update_request: ~azure.ai.client.models.UpdateEvaluationRequest :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -794,14 +707,14 @@ def update( @overload def update( - self, id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, id: str, update_request: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.Evaluation: """Update an evaluation. :param id: Identifier of the evaluation. Required. :type id: str - :param body: Update evaluation request. Required. - :type body: JSON + :param update_request: Update evaluation request. Required. + :type update_request: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -812,14 +725,14 @@ def update( @overload def update( - self, id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, id: str, update_request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.Evaluation: """Update an evaluation. :param id: Identifier of the evaluation. Required. :type id: str - :param body: Update evaluation request. Required. - :type body: IO[bytes] + :param update_request: Update evaluation request. Required. + :type update_request: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -830,15 +743,15 @@ def update( @distributed_trace def update( - self, id: str, body: Union[_models.UpdateEvaluationRequest, JSON, IO[bytes]], **kwargs: Any + self, id: str, update_request: Union[_models.UpdateEvaluationRequest, JSON, IO[bytes]], **kwargs: Any ) -> _models.Evaluation: """Update an evaluation. :param id: Identifier of the evaluation. Required. :type id: str - :param body: Update evaluation request. Is one of the following types: UpdateEvaluationRequest, - JSON, IO[bytes] Required. - :type body: ~azure.ai.client.models.UpdateEvaluationRequest or JSON or IO[bytes] + :param update_request: Update evaluation request. Is one of the following types: + UpdateEvaluationRequest, JSON, IO[bytes] Required. + :type update_request: ~azure.ai.client.models.UpdateEvaluationRequest or JSON or IO[bytes] :return: Evaluation. The Evaluation is compatible with MutableMapping :rtype: ~azure.ai.client.models.Evaluation :raises ~azure.core.exceptions.HttpResponseError: @@ -859,15 +772,15 @@ def update( content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(update_request, (IOBase, bytes)): + _content = update_request else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(update_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_evaluations_evaluations_update_request( + _request = build_evaluations_update_request( id=id, - api_version=self._config.api_version, content_type=content_type, + api_version=self._config.api_version, content=_content, headers=_headers, params=_params, @@ -907,3 +820,68 @@ def update( return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @distributed_trace + def get(self, id: str, **kwargs: Any) -> _models.Evaluation: + """Get an evaluation. + + :param id: Identifier of the evaluation. Required. + :type id: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + _request = build_evaluations_get_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/ai/azure-ai-client/generated_tests/conftest.py b/sdk/ai/azure-ai-client/generated_tests/conftest.py index a308f4a37a08..72c378f12508 100644 --- a/sdk/ai/azure-ai-client/generated_tests/conftest.py +++ b/sdk/ai/azure-ai-client/generated_tests/conftest.py @@ -21,14 +21,14 @@ # For security, please avoid record sensitive identity information in recordings @pytest.fixture(scope="session", autouse=True) def add_sanitizers(test_proxy): - _subscription_id = os.environ.get("_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") - _tenant_id = os.environ.get("_TENANT_ID", "00000000-0000-0000-0000-000000000000") - _client_id = os.environ.get("_CLIENT_ID", "00000000-0000-0000-0000-000000000000") - _client_secret = os.environ.get("_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=_subscription_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=_tenant_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=_client_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=_client_secret, value="00000000-0000-0000-0000-000000000000") + azureai_subscription_id = os.environ.get("AZUREAI_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + azureai_tenant_id = os.environ.get("AZUREAI_TENANT_ID", "00000000-0000-0000-0000-000000000000") + azureai_client_id = os.environ.get("AZUREAI_CLIENT_ID", "00000000-0000-0000-0000-000000000000") + azureai_client_secret = os.environ.get("AZUREAI_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=azureai_subscription_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=azureai_tenant_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=azureai_client_id, value="00000000-0000-0000-0000-000000000000") + add_general_regex_sanitizer(regex=azureai_client_secret, value="00000000-0000-0000-0000-000000000000") add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]") add_header_regex_sanitizer(key="Cookie", value="cookie;") diff --git a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations.py b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations.py new file mode 100644 index 000000000000..a698c1815fd3 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils import recorded_by_proxy +from testpreparer import AzureAIClientTestBase, AzureAIPreparer + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestAzureAIEvaluationsOperations(AzureAIClientTestBase): + @AzureAIPreparer() + @recorded_by_proxy + def test_evaluations_create(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.evaluations.create( + evaluation={ + "data": "input_data", + "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, + "description": "str", + "displayName": "str", + "id": "str", + "properties": {"str": "str"}, + "status": "str", + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + }, + "tags": {"str": "str"}, + }, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_evaluations_list(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.evaluations.list() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_evaluations_update(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.evaluations.update( + id="str", + update_request={"description": "str", "displayName": "str", "tags": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_evaluations_get(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.evaluations.get( + id="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations_async.py b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations_async.py new file mode 100644 index 000000000000..7f9b753e530a --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations_async.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils.aio import recorded_by_proxy_async +from testpreparer import AzureAIPreparer +from testpreparer_async import AzureAIClientTestBaseAsync + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestAzureAIEvaluationsOperationsAsync(AzureAIClientTestBaseAsync): + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_evaluations_create(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.evaluations.create( + evaluation={ + "data": "input_data", + "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, + "description": "str", + "displayName": "str", + "id": "str", + "properties": {"str": "str"}, + "status": "str", + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + }, + "tags": {"str": "str"}, + }, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_evaluations_list(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = client.evaluations.list() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_evaluations_update(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.evaluations.update( + id="str", + update_request={"description": "str", "displayName": "str", "tags": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_evaluations_get(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.evaluations.get( + id="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/testpreparer.py b/sdk/ai/azure-ai-client/generated_tests/testpreparer.py index 07f80c19c778..7230206c1f80 100644 --- a/sdk/ai/azure-ai-client/generated_tests/testpreparer.py +++ b/sdk/ai/azure-ai-client/generated_tests/testpreparer.py @@ -5,20 +5,20 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from azure.ai.client import Client +from azure.ai.client import AzureAIClient from devtools_testutils import AzureRecordedTestCase, PowerShellPreparer import functools -class ClientTestBase(AzureRecordedTestCase): +class AzureAIClientTestBase(AzureRecordedTestCase): def create_client(self, endpoint): - credential = self.get_credential(Client) + credential = self.get_credential(AzureAIClient) return self.create_client_from_credential( - Client, + AzureAIClient, credential=credential, endpoint=endpoint, ) -Preparer = functools.partial(PowerShellPreparer, "", _endpoint="https://fake__endpoint.com") +AzureAIPreparer = functools.partial(PowerShellPreparer, "azureai", azureai_endpoint="https://fake_azureai_endpoint.com") diff --git a/sdk/ai/azure-ai-client/generated_tests/testpreparer_async.py b/sdk/ai/azure-ai-client/generated_tests/testpreparer_async.py index 78bccaa8d731..85d0b79b37c3 100644 --- a/sdk/ai/azure-ai-client/generated_tests/testpreparer_async.py +++ b/sdk/ai/azure-ai-client/generated_tests/testpreparer_async.py @@ -5,16 +5,16 @@ # Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from azure.ai.client.aio import Client +from azure.ai.client.aio import AzureAIClient from devtools_testutils import AzureRecordedTestCase -class ClientTestBaseAsync(AzureRecordedTestCase): +class AzureAIClientTestBaseAsync(AzureRecordedTestCase): def create_async_client(self, endpoint): - credential = self.get_credential(Client, is_async=True) + credential = self.get_credential(AzureAIClient, is_async=True) return self.create_client_from_credential( - Client, + AzureAIClient, credential=credential, endpoint=endpoint, ) diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index 270230ca0344..dc9e0da44593 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -1,4 +1,3 @@ - # These are needed for SDK logging. You can ignore them. import sys import logging @@ -31,7 +30,7 @@ subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) # You can get an authenticated azure.ai.inference client directly, if you have a serverless endpoint in your project: @@ -62,8 +61,8 @@ # You can list all endpoints of a particular "type", with or without their credentials: endpoints = ai_client.endpoints.list( - endpoint_type=EndpointType.AZURE_OPEN_AI, # Optional. Defaults to all types. - populate_secrets=True # Optional. Defaults to "False" + endpoint_type=EndpointType.AZURE_OPEN_AI, # Optional. Defaults to all types. + populate_secrets=True, # Optional. Defaults to "False" ) print("====> Listing all Azure Open AI endpoints:") for endpoint in endpoints: @@ -72,16 +71,14 @@ # You can get the default endpoint of a particular "type" (note that since at the moment the service # does not have a notion of a default endpoint, this will return the first endpoint of that type): endpoint = ai_client.endpoints.get_default( - endpoint_type=EndpointType.AZURE_OPEN_AI, # Required. - populate_secrets=True # Optional. Defaults to "False" + endpoint_type=EndpointType.AZURE_OPEN_AI, populate_secrets=True # Required. # Optional. Defaults to "False" ) print("====> Get default Azure Open AI endpoint:") print(endpoint) # You can get an endpoint by its name: endpoint = ai_client.endpoints.get( - endpoint_name=os.environ["AI_CLIENT_CONNECTION_NAME"], # Required. - populate_secrets=True + endpoint_name=os.environ["AI_CLIENT_CONNECTION_NAME"], populate_secrets=True # Required. ) print("====> Print properties of a particular endpoint:") print(endpoint) @@ -94,13 +91,15 @@ client = AzureOpenAI( api_key=endpoint.key, azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", # TODO: Is this needed? + api_version="2024-08-01-preview", # TODO: Is this needed? ) elif endpoint.authentication_type == AuthenticationType.AAD: print("====> Creating AzureOpenAI client using Entra ID authentication") client = AzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider(endpoint.token_credential, "https://cognitiveservices.azure.com/.default"), + azure_ad_token_provider=get_bearer_token_provider( + endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + ), azure_endpoint=endpoint.endpoint_url, api_version="2024-08-01-preview", ) @@ -108,7 +107,9 @@ # TODO - Not yet supported by the service. Expected 9/27. print("====> Creating AzureOpenAI client using SAS authentication") client = AzureOpenAI( - azure_ad_token_provider=get_bearer_token_provider(endpoint.token_credential, "https://cognitiveservices.azure.com/.default"), + azure_ad_token_provider=get_bearer_token_provider( + endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + ), azure_endpoint=endpoint.endpoint_url, api_version="2024-08-01-preview", ) @@ -136,22 +137,12 @@ elif endpoint.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") - client = ChatCompletionsClient( - endpoint=endpoint.endpoint_url, - credential=endpoint.properties.token_credential - ) + client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential) elif endpoint.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. print("====> Creating ChatCompletionsClient using SAS authentication") - client = ChatCompletionsClient( - endpoint=endpoint.endpoint_url, - credential=endpoint.token_credential - ) + client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=endpoint.token_credential) - response = client.complete( - messages=[ - UserMessage(content="How many feet are in a mile?") - ] - ) + response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - print(response.choices[0].message.content) \ No newline at end of file + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py new file mode 100644 index 000000000000..d9199dc6c379 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py @@ -0,0 +1,95 @@ +import os +from pprint import pprint + +from azure.ai.client import AzureAIClient +from azure.identity import DefaultAzureCredential + +from azure.ai.client.models import Evaluation, Dataset, EvaluatorConfiguration + + +# Project Configuration Canary +Subscription = "2d385bf4-0756-4a76-aa95-28bf9ed3b625" +ResourceGroup = "rg-anksingai" +Workspace = "anksing-canary" +DataUri = "azureml://locations/eastus2euap/workspaces/a51c1ea7-5c29-4c32-a98e-7fa752f36e7c/data/test-remote-eval-data/versions/1" +Endpoint = "https://eastus2euap.api.azureml.ms" + +# Create an Azure AI client +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=f"{Endpoint};{Subscription};{ResourceGroup};{Workspace}", + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) + +# Create an evaluation +evaluation = Evaluation( + display_name="Remote Evaluation", + description="Evaluation of dataset", + data=Dataset(id=DataUri), + evaluators={ + "f1_score": EvaluatorConfiguration( + id="azureml://registries/jamahaja-evals-registry/models/F1ScoreEvaluator/versions/1" + ), + "relevance": EvaluatorConfiguration( + id="azureml://registries/jamahaja-evals-registry/models/Relevance-Evaluator-AI-Evaluation/versions/2", + init_params={ + "model_config": { + "api_key": "/subscriptions/2d385bf4-0756-4a76-aa95-28bf9ed3b625/resourceGroups/rg-anksingai/providers/Microsoft.MachineLearningServices/workspaces/anksing-canary/connections/ai-anksingai0771286510468288/credentials/key", + "azure_deployment": "gpt-4", + "api_version": "2023-07-01-preview", + "azure_endpoint": "https://ai-anksingai0771286510468288.openai.azure.com/", + } + }, + ), + }, + # This is needed as a workaround until environment gets published to registry + properties={"Environment": "azureml://registries/jamahaja-evals-registry/environments/eval-remote-env/versions/6"}, +) + +# Create evaluation +evaluation_response = ai_client.evaluations.create( + evaluation=evaluation, +) + +pprint(evaluation_response.as_dict()) + +# Get evaluation +get_evaluation_response = ai_client.evaluations.get(evaluation_response.id) + +pprint(get_evaluation_response.as_dict()) + + +evaluation_json = { + "Data": {"Uri": DataUri}, + "DisplayName": "Remote Evaluation", + "Description": "Testing", + # "Environment": "azureml://registries/jamahaja-evals-registry/environments/eval-remote-env/versions/2", + "Evaluators": { + "f1_score": {"Id": "azureml://registries/jamahaja-evals-registry/models/F1ScoreEvaluator/versions/1"}, + "relevance": { + "Id": "azureml://registries/jamahaja-evals-registry/models/Relevance-Evaluator-AI-Evaluation/versions/1", + "initParams": { + "model_config": { + "api_key": "/subscriptions/2d385bf4-0756-4a76-aa95-28bf9ed3b625/resourceGroups/rg-anksingai/providers/Microsoft.MachineLearningServices/workspaces/anksing-canary/connections/ai-anksingaicanary931822963616_aoai/credentials/key", + "azure_deployment": "gpt-4", + "api_version": "2023-07-01-preview", + "azure_endpoint": "https://ai-anksingaicanary931822963616.openai.azure.com/", + } + }, + }, + "hate_unfairness": { + "Id": "azureml://registries/jamahaja-evals-registry/models/HateUnfairnessEvaluator/versions/2", + "initParams": { + "azure_ai_project": { + "subscription_id": "2d385bf4-0756-4a76-aa95-28bf9ed3b625", + "resource_group_name": "rg-anksingai", + "workspace_name": "anksing-canary", + } + }, + }, + }, + "properties": { + "Environment": "azureml://registries/jamahaja-evals-registry/environments/eval-remote-env/versions/6", + # "_azureml.evaluation_run": "promptflow.BatchRun" + }, +} diff --git a/sdk/ai/azure-ai-client/tsp-location.yaml b/sdk/ai/azure-ai-client/tsp-location.yaml index 352e32d6e00e..a941b4597271 100644 --- a/sdk/ai/azure-ai-client/tsp-location.yaml +++ b/sdk/ai/azure-ai-client/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Client -commit: e64174ba13dd23ee492bea01c6a6d61871be63e6 +commit: c989add6689ea68698d8ec219203ab19269363d6 repo: Azure/azure-rest-api-specs additionalDirectories: From 6b53fc60bfbd0758190e56949907bc2a9111f826 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 4 Oct 2024 18:14:06 -0700 Subject: [PATCH 012/138] Add .inference.get_embeddings_client() method --- .../azure/ai/client/models/__init__.py | 6 +++ .../azure/ai/client/operations/_patch.py | 38 +++++++++++++++++++ .../samples/endpoints/sample_endpoints.py | 20 +++++++++- 3 files changed, 63 insertions(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py index fecabd03f9ef..8bb16afadb31 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py @@ -16,6 +16,9 @@ from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import from ._patch import patch_sdk as _patch_sdk +from ._models import CredentialsSASAuth +from ._enums import AuthenticationType +from ._enums import EndpointType __all__ = [ "AppInsightsConfiguration", @@ -25,6 +28,9 @@ "InputData", "SystemData", "UpdateEvaluationRequest", + "EndpointType", + "AuthenticationType", + "CredentialsSASAuth", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index ae8654c7bd9e..003401135bd9 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -63,6 +63,44 @@ def get_chat_completions_client(self) -> "ChatComletionsClient": return client + def get_embeddings_client(self) -> "EmbeddingsClient": + endpoint = self.outer_instance.endpoints.get_default( + endpoint_type=EndpointType.SERVERLESS, + populate_secrets=True + ) + if not endpoint: + raise ValueError("No serverless endpoint found") + + try: + from azure.ai.inference import EmbeddingsClient + except ModuleNotFoundError as _: + raise ModuleNotFoundError("Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'") + + if endpoint.authentication_type == AuthenticationType.API_KEY: + logger.debug("[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication") + from azure.core.credentials import AzureKeyCredential + client = EmbeddingsClient( + endpoint=endpoint.endpoint_url, + credential=AzureKeyCredential(endpoint.key) + ) + elif endpoint.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + logger.debug("[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication") + client = EmbeddingsClient( + endpoint=endpoint.endpoint_url, + credential=endpoint.properties.token_credential + ) + elif endpoint.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + logger.debug("[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication") + client = EmbeddingsClient( + endpoint=endpoint.endpoint_url, + credential=endpoint.token_credential + ) + else: + raise ValueError("Unknown authentication type") + + return client def get_azure_openai_client(self) -> "AzureOpenAI": endpoint = self.outer_instance.endpoints.get_default( diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index dc9e0da44593..cd94e5b58f27 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -33,7 +33,7 @@ logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) -# You can get an authenticated azure.ai.inference client directly, if you have a serverless endpoint in your project: +# You can get an authenticated azure.ai.inference chat completions client directly, if you have a serverless endpoint in your project: client = ai_client.inference.get_chat_completions_client() response = client.complete( @@ -44,6 +44,24 @@ print(response.choices[0].message.content) +# You can get an authenticated azure.ai.inference embeddings client directly, if you have a serverless endpoint in your project: +client = ai_client.inference.get_embeddings_client() + +response = client.embed( + input=[ + "first phrase", + "second phrase", + "third phrase" + ] +) + +for item in response.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) + # You can get an authenticated AzureOpenAI client directly, if you have an Azure OpenAI endpoint in your project: client = ai_client.inference.get_azure_openai_client() From 3043ab1914ae896f22c7dcf0852b3cf8ab9aef56 Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Mon, 7 Oct 2024 10:47:56 -0800 Subject: [PATCH 013/138] Jhakulin/azure ai client agents (#37748) * generated agents * merge latest * update * add patches * update * new version generated * fixes * fix merge error * update * use assistant_id for now * update --- .../azure/ai/client/_client.py | 6 +- .../azure-ai-client/azure/ai/client/_patch.py | 2 +- .../azure-ai-client/azure/ai/client/_types.py | 20 + .../azure/ai/client/_vendor.py | 50 + .../azure/ai/client/aio/_client.py | 6 +- .../ai/client/aio/operations/__init__.py | 4 +- .../ai/client/aio/operations/_operations.py | 4915 ++++++++++++- .../azure/ai/client/models/__init__.py | 299 +- .../azure/ai/client/models/_enums.py | 449 ++ .../azure/ai/client/models/_models.py | 5028 +++++++++++++- .../azure/ai/client/models/_patch.py | 711 +- .../azure/ai/client/operations/__init__.py | 4 +- .../azure/ai/client/operations/_operations.py | 6160 ++++++++++++++++- .../azure/ai/client/operations/_patch.py | 1007 ++- .../generated_tests/test_agents_operations.py | 606 ++ .../test_agents_operations_async.py | 607 ++ .../test_azure_ai_agents_operations.py | 606 ++ .../test_azure_ai_agents_operations_async.py | 607 ++ .../samples/agents/sample_agents_basics.py | 61 + .../samples/endpoints/sample_endpoints.py | 22 +- sdk/ai/azure-ai-client/tsp-location.yaml | 2 +- 21 files changed, 20926 insertions(+), 246 deletions(-) create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_types.py create mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_vendor.py create mode 100644 sdk/ai/azure-ai-client/generated_tests/test_agents_operations.py create mode 100644 sdk/ai/azure-ai-client/generated_tests/test_agents_operations_async.py create mode 100644 sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations.py create mode 100644 sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations_async.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/_client.py index 4bec476e91db..cb5b3efc371a 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_client.py @@ -26,10 +26,10 @@ class AzureAIClient: # pylint: disable=client-accepts-api-version-keyword """AzureAIClient. - :ivar endpoints: EndpointsOperations operations - :vartype endpoints: azure.ai.client.operations.EndpointsOperations :ivar agents: AgentsOperations operations :vartype agents: azure.ai.client.operations.AgentsOperations + :ivar endpoints: EndpointsOperations operations + :vartype endpoints: azure.ai.client.operations.EndpointsOperations :ivar evaluations: EvaluationsOperations operations :vartype evaluations: azure.ai.client.operations.EvaluationsOperations :param endpoint: The Azure AI Studio project endpoint, in the form @@ -92,8 +92,8 @@ def __init__( self._serialize = Serializer() self._deserialize = Deserializer() self._serialize.client_side_validation = False - self.endpoints = EndpointsOperations(self._client, self._config, self._serialize, self._deserialize) self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) + self.endpoints = EndpointsOperations(self._client, self._config, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index e6c73772dbc9..d05ae45e0670 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -86,7 +86,7 @@ def __init__( self._client1: PipelineClient = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) # For Agents operations - _endpoint2 = f"{endpoint}/assistants/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long + _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long self._config2 = ClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_types.py b/sdk/ai/azure-ai-client/azure/ai/client/_types.py new file mode 100644 index 000000000000..4c06fd33de50 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/_types.py @@ -0,0 +1,20 @@ +# coding=utf-8 +# pylint: disable=too-many-lines +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING, Union + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from . import models as _models + from .. import models as _models +AgentsApiResponseFormatOption = Union[ + str, str, "_models.AgentsApiResponseFormatMode", "_models.AgentsApiResponseFormat" +] +MessageAttachmentToolDefinition = Union["_models.CodeInterpreterToolDefinition", "_models.FileSearchToolDefinition"] +AgentsApiToolChoiceOption = Union[str, str, "_models.AgentsApiToolChoiceOptionMode", "_models.AgentsNamedToolChoice"] diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_vendor.py b/sdk/ai/azure-ai-client/azure/ai/client/_vendor.py new file mode 100644 index 000000000000..e6f010934827 --- /dev/null +++ b/sdk/ai/azure-ai-client/azure/ai/client/_vendor.py @@ -0,0 +1,50 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import json +from typing import Any, Dict, IO, List, Mapping, Optional, Tuple, Union + +from ._model_base import Model, SdkJSONEncoder + + +# file-like tuple could be `(filename, IO (or bytes))` or `(filename, IO (or bytes), content_type)` +FileContent = Union[str, bytes, IO[str], IO[bytes]] + +FileType = Union[ + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], FileContent, Optional[str]], +] + + +def serialize_multipart_data_entry(data_entry: Any) -> Any: + if isinstance(data_entry, (list, tuple, dict, Model)): + return json.dumps(data_entry, cls=SdkJSONEncoder, exclude_readonly=True) + return data_entry + + +def prepare_multipart_form_data( + body: Mapping[str, Any], multipart_fields: List[str], data_fields: List[str] +) -> Tuple[List[FileType], Dict[str, Any]]: + files: List[FileType] = [] + data: Dict[str, Any] = {} + for multipart_field in multipart_fields: + multipart_entry = body.get(multipart_field) + if isinstance(multipart_entry, list): + files.extend([(multipart_field, e) for e in multipart_entry]) + elif multipart_entry: + files.append((multipart_field, multipart_entry)) + + for data_field in data_fields: + data_entry = body.get(data_field) + if data_entry: + data[data_field] = serialize_multipart_data_entry(data_entry) + + return files, data diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py index 7eea6e0e2690..5c9645c1f9b9 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py @@ -26,10 +26,10 @@ class AzureAIClient: # pylint: disable=client-accepts-api-version-keyword """AzureAIClient. - :ivar endpoints: EndpointsOperations operations - :vartype endpoints: azure.ai.client.aio.operations.EndpointsOperations :ivar agents: AgentsOperations operations :vartype agents: azure.ai.client.aio.operations.AgentsOperations + :ivar endpoints: EndpointsOperations operations + :vartype endpoints: azure.ai.client.aio.operations.EndpointsOperations :ivar evaluations: EvaluationsOperations operations :vartype evaluations: azure.ai.client.aio.operations.EvaluationsOperations :param endpoint: The Azure AI Studio project endpoint, in the form @@ -92,8 +92,8 @@ def __init__( self._serialize = Serializer() self._deserialize = Deserializer() self._serialize.client_side_validation = False - self.endpoints = EndpointsOperations(self._client, self._config, self._serialize, self._deserialize) self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) + self.endpoints = EndpointsOperations(self._client, self._config, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) def send_request( diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py index 4c4fe956f18f..8e6a46afb1f5 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py @@ -6,8 +6,8 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._operations import EndpointsOperations from ._operations import AgentsOperations +from ._operations import EndpointsOperations from ._operations import EvaluationsOperations from ._patch import __all__ as _patch_all @@ -15,8 +15,8 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "EndpointsOperations", "AgentsOperations", + "EndpointsOperations", "EvaluationsOperations", ] __all__.extend([p for p in _patch_all if p not in __all__]) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py index b5331cd78933..9d8926ce14a4 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py @@ -9,7 +9,7 @@ from io import IOBase import json import sys -from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, Type, TypeVar, Union, overload +from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, TYPE_CHECKING, Type, TypeVar, Union, overload import urllib.parse from azure.core.async_paging import AsyncItemPaged, AsyncList @@ -29,9 +29,50 @@ from azure.core.tracing.decorator_async import distributed_trace_async from azure.core.utils import case_insensitive_dict -from ... import models as _models +from ... import _model_base, models as _models from ..._model_base import SdkJSONEncoder, _deserialize +from ..._vendor import FileType, prepare_multipart_form_data from ...operations._operations import ( + build_agents_cancel_run_request, + build_agents_cancel_vector_store_file_batch_request, + build_agents_create_agent_request, + build_agents_create_message_request, + build_agents_create_run_request, + build_agents_create_thread_and_run_request, + build_agents_create_thread_request, + build_agents_create_vector_store_file_batch_request, + build_agents_create_vector_store_file_request, + build_agents_create_vector_store_request, + build_agents_delete_agent_request, + build_agents_delete_file_request, + build_agents_delete_thread_request, + build_agents_delete_vector_store_file_request, + build_agents_delete_vector_store_request, + build_agents_get_agent_request, + build_agents_get_file_content_request, + build_agents_get_file_request, + build_agents_get_message_request, + build_agents_get_run_request, + build_agents_get_run_step_request, + build_agents_get_thread_request, + build_agents_get_vector_store_file_batch_request, + build_agents_get_vector_store_file_request, + build_agents_get_vector_store_request, + build_agents_list_agents_request, + build_agents_list_files_request, + build_agents_list_messages_request, + build_agents_list_run_steps_request, + build_agents_list_runs_request, + build_agents_list_vector_store_file_batch_files_request, + build_agents_list_vector_store_files_request, + build_agents_list_vector_stores_request, + build_agents_modify_vector_store_request, + build_agents_submit_tool_outputs_to_run_request, + build_agents_update_agent_request, + build_agents_update_message_request, + build_agents_update_run_request, + build_agents_update_thread_request, + build_agents_upload_file_request, build_endpoints_list_request, build_endpoints_list_secrets_request, build_evaluations_create_request, @@ -44,12 +85,4864 @@ from collections.abc import MutableMapping else: from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from ... import _types JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] +class AgentsOperations: # pylint: disable=too-many-public-methods + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.client.aio.AzureAIClient`'s + :attr:`agents` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + if model is _Unset: + raise TypeError("missing required argument: model") + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_agent_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_agents( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfAgent: + """Gets a list of agents that were previously created. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfAgent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) + + _request = build_agents_list_agents_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: + """Retrieves an existing agent. + + :param assistant_id: Identifier of the agent. Required. + :type assistant_id: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + _request = build_agents_get_agent_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_agent( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_agent( + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_agent( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_agent( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_agent_request( + assistant_id=assistant_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: + """Deletes an agent. + + :param assistant_id: Identifier of the agent. Required. + :type assistant_id: str + :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_agent_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_thread( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread( + self, + *, + content_type: str = "application/json", + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.client.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_thread( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.client.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_thread_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: + """Gets information about an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + _request = build_agents_get_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_thread( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_thread( + self, + thread_id: str, + *, + content_type: str = "application/json", + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_thread( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_thread( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_thread_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: + """Deletes an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_message( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_message( + self, + thread_id: str, + *, + role: Union[str, _models.MessageRole], + content: str, + content_type: str = "application/json", + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.client.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.client.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_message( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_message( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + role: Union[str, _models.MessageRole] = _Unset, + content: str = _Unset, + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.client.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.client.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + if role is _Unset: + raise TypeError("missing required argument: role") + if content is _Unset: + raise TypeError("missing required argument: content") + body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_message_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_messages( + self, + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadMessage: + """Gets a list of messages that exist on a thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword run_id: Filter messages by the run ID that generated them. Default value is None. + :paramtype run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible + with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) + + _request = build_agents_list_messages_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: + """Gets an existing message from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + _request = build_agents_get_message_request( + thread_id=thread_id, + message_id=message_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_message( + self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_message( + self, + thread_id: str, + message_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_message( + self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_message( + self, + thread_id: str, + message_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_message_request( + thread_id=thread_id, + message_id=message_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_run( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "additional_instructions": additional_instructions, + "additional_messages": additional_messages, + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_run_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_runs( + self, + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadRun: + """Gets a list of runs for a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_list_runs_request( + thread_id=thread_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Gets an existing run from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_get_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_run( + self, + thread_id: str, + run_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if tool_outputs is _Unset: + raise TypeError("missing required argument: tool_outputs") + body = {"stream": stream_parameter, "tool_outputs": tool_outputs} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_submit_tool_outputs_to_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Cancels a run of an in progress thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_cancel_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_thread_and_run( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread_and_run( + self, + *, + assistant_id: str, + content_type: str = "application/json", + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :keyword assistant_id: The ID of the agent for which the thread should be created. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.client.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread_and_run( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_thread_and_run( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent for which the thread should be created. Required. + :paramtype assistant_id: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.client.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "thread": thread, + "tool_choice": tool_choice, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_thread_and_run_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> _models.RunStep: + """Gets a single run step from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param step_id: Identifier of the run step. Required. + :type step_id: str + :return: RunStep. The RunStep is compatible with MutableMapping + :rtype: ~azure.ai.client.models.RunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) + + _request = build_agents_get_run_step_request( + thread_id=thread_id, + run_id=run_id, + step_id=step_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.RunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_run_steps( + self, + thread_id: str, + run_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfRunStep: + """Gets a list of run steps from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfRunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) + + _request = build_agents_list_run_steps_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_files( + self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any + ) -> _models.FileListResponse: + """Gets a list of previously uploaded files. + + :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is + None. + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose + :return: FileListResponse. The FileListResponse is compatible with MutableMapping + :rtype: ~azure.ai.client.models.FileListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) + + _request = build_agents_list_files_request( + purpose=purpose, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileListResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: The file data, in bytes. Required. + :paramtype file: ~azure.ai.client._vendor.FileType + :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and + Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose + :keyword filename: The name of the file. Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def upload_file( + self, + body: JSON = _Unset, + *, + file: FileType = _Unset, + purpose: Union[str, _models.FilePurpose] = _Unset, + filename: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Is one of the following types: JSON Required. + :type body: JSON + :keyword file: The file data, in bytes. Required. + :paramtype file: ~azure.ai.client._vendor.FileType + :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and + Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose + :keyword filename: The name of the file. Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + if body is _Unset: + if file is _Unset: + raise TypeError("missing required argument: file") + if purpose is _Unset: + raise TypeError("missing required argument: purpose") + body = {"file": file, "filename": filename, "purpose": purpose} + body = {k: v for k, v in body.items() if v is not None} + _body = body.as_dict() if isinstance(body, _model_base.Model) else body + _file_fields: List[str] = ["file"] + _data_fields: List[str] = ["purpose", "filename"] + _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) + + _request = build_agents_upload_file_request( + api_version=self._config.api_version, + files=_files, + data=_data, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: + """Delete a previously uploaded file. + + :param file_id: The ID of the file to delete. Required. + :type file_id: str + :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.client.models.FileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _request = build_agents_get_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentResponse: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: FileContentResponse. The FileContentResponse is compatible with MutableMapping + :rtype: ~azure.ai.client.models.FileContentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileContentResponse] = kwargs.pop("cls", None) + + _request = build_agents_get_file_content_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileContentResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_stores( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStore: + """Returns a list of vector stores. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible + with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_stores_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "chunking_strategy": chunking_strategy, + "expires_after": expires_after, + "file_ids": file_ids, + "metadata": metadata, + "name": name, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: + """Returns the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def modify_vector_store( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def modify_vector_store( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def modify_vector_store( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def modify_vector_store( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"expires_after": expires_after, "metadata": metadata, "name": name} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_modify_vector_store_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: + """Deletes the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_store_files( + self, + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.client.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_store_files_request( + vector_store_id=vector_store_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store_file( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file( + self, + vector_store_id: str, + *, + file_id: str, + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_id: Identifier of the file. Required. + :paramtype file_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_id: str = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_id: Identifier of the file. Required. + :paramtype file_id: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + if body is _Unset: + if file_id is _Unset: + raise TypeError("missing required argument: file_id") + body = {"chunking_strategy": chunking_strategy, "file_id": file_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_file_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: + """Retrieves a vector store file. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_vector_store_file( + self, vector_store_id: str, file_id: str, **kwargs: Any + ) -> _models.VectorStoreFileDeletionStatus: + """Delete a vector store file. This will remove the file from the vector store but the file itself + will not be deleted. + To delete the file, use the delete file endpoint. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store_file_batch( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch( + self, + vector_store_id: str, + *, + file_ids: List[str], + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file_batch( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: List[str] = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + if body is _Unset: + if file_ids is _Unset: + raise TypeError("missing required argument: file_ids") + body = {"chunking_strategy": chunking_strategy, "file_ids": file_ids} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_file_batch_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Retrieve a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def cancel_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch + as soon as possible. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_agents_cancel_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_store_file_batch_files( + self, + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files in a batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.client.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_store_file_batch_files_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + class EndpointsOperations: """ .. warning:: @@ -279,24 +5172,6 @@ async def _list_secrets( # pylint: disable=protected-access return deserialized # type: ignore -class AgentsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.client.aio.AzureAIClient`'s - :attr:`agents` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - class EvaluationsOperations: """ .. warning:: diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py index 8bb16afadb31..9377c96de006 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py @@ -6,13 +6,160 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from ._models import Agent +from ._models import AgentDeletionStatus +from ._models import AgentThread +from ._models import AgentThreadCreationOptions +from ._models import AgentsApiResponseFormat +from ._models import AgentsNamedToolChoice from ._models import AppInsightsConfiguration +from ._models import CodeInterpreterToolDefinition +from ._models import CodeInterpreterToolResource from ._models import Dataset from ._models import Evaluation from ._models import EvaluatorConfiguration +from ._models import FileContentResponse +from ._models import FileDeletionStatus +from ._models import FileListResponse +from ._models import FileSearchToolDefinition +from ._models import FileSearchToolDefinitionDetails +from ._models import FileSearchToolResource +from ._models import FunctionDefinition +from ._models import FunctionName +from ._models import FunctionToolDefinition from ._models import InputData +from ._models import MessageAttachment +from ._models import MessageContent +from ._models import MessageDelta +from ._models import MessageDeltaChunk +from ._models import MessageDeltaContent +from ._models import MessageDeltaImageFileContent +from ._models import MessageDeltaImageFileContentObject +from ._models import MessageDeltaTextAnnotation +from ._models import MessageDeltaTextContent +from ._models import MessageDeltaTextContentObject +from ._models import MessageDeltaTextFileCitationAnnotation +from ._models import MessageDeltaTextFileCitationAnnotationObject +from ._models import MessageDeltaTextFilePathAnnotation +from ._models import MessageDeltaTextFilePathAnnotationObject +from ._models import MessageImageFileContent +from ._models import MessageImageFileDetails +from ._models import MessageIncompleteDetails +from ._models import MessageTextAnnotation +from ._models import MessageTextContent +from ._models import MessageTextDetails +from ._models import MessageTextFileCitationAnnotation +from ._models import MessageTextFileCitationDetails +from ._models import MessageTextFilePathAnnotation +from ._models import MessageTextFilePathDetails +from ._models import OpenAIFile +from ._models import OpenAIPageableListOfAgent +from ._models import OpenAIPageableListOfRunStep +from ._models import OpenAIPageableListOfThreadMessage +from ._models import OpenAIPageableListOfThreadRun +from ._models import OpenAIPageableListOfVectorStore +from ._models import OpenAIPageableListOfVectorStoreFile +from ._models import RequiredAction +from ._models import RequiredFunctionToolCall +from ._models import RequiredFunctionToolCallDetails +from ._models import RequiredToolCall +from ._models import RunCompletionUsage +from ._models import RunError +from ._models import RunStep +from ._models import RunStepCodeInterpreterImageOutput +from ._models import RunStepCodeInterpreterImageReference +from ._models import RunStepCodeInterpreterLogOutput +from ._models import RunStepCodeInterpreterToolCall +from ._models import RunStepCodeInterpreterToolCallDetails +from ._models import RunStepCodeInterpreterToolCallOutput +from ._models import RunStepCompletionUsage +from ._models import RunStepDelta +from ._models import RunStepDeltaChunk +from ._models import RunStepDeltaCodeInterpreterDetailItemObject +from ._models import RunStepDeltaCodeInterpreterImageOutput +from ._models import RunStepDeltaCodeInterpreterImageOutputObject +from ._models import RunStepDeltaCodeInterpreterLogOutput +from ._models import RunStepDeltaCodeInterpreterOutput +from ._models import RunStepDeltaCodeInterpreterToolCall +from ._models import RunStepDeltaDetail +from ._models import RunStepDeltaFileSearchToolCall +from ._models import RunStepDeltaFunction +from ._models import RunStepDeltaFunctionToolCall +from ._models import RunStepDeltaMessageCreation +from ._models import RunStepDeltaMessageCreationObject +from ._models import RunStepDeltaToolCall +from ._models import RunStepDeltaToolCallObject +from ._models import RunStepDetails +from ._models import RunStepError +from ._models import RunStepFileSearchToolCall +from ._models import RunStepFunctionToolCall +from ._models import RunStepFunctionToolCallDetails +from ._models import RunStepMessageCreationDetails +from ._models import RunStepMessageCreationReference +from ._models import RunStepToolCall +from ._models import RunStepToolCallDetails +from ._models import SubmitToolOutputsAction +from ._models import SubmitToolOutputsDetails from ._models import SystemData +from ._models import ThreadDeletionStatus +from ._models import ThreadMessage +from ._models import ThreadMessageOptions +from ._models import ThreadRun +from ._models import ToolDefinition +from ._models import ToolOutput +from ._models import ToolResources +from ._models import TruncationObject +from ._models import UpdateCodeInterpreterToolResourceOptions from ._models import UpdateEvaluationRequest +from ._models import UpdateFileSearchToolResourceOptions +from ._models import UpdateToolResourcesOptions +from ._models import VectorStore +from ._models import VectorStoreAutoChunkingStrategyRequest +from ._models import VectorStoreAutoChunkingStrategyResponse +from ._models import VectorStoreChunkingStrategyRequest +from ._models import VectorStoreChunkingStrategyResponse +from ._models import VectorStoreDeletionStatus +from ._models import VectorStoreExpirationPolicy +from ._models import VectorStoreFile +from ._models import VectorStoreFileBatch +from ._models import VectorStoreFileCount +from ._models import VectorStoreFileDeletionStatus +from ._models import VectorStoreFileError +from ._models import VectorStoreStaticChunkingStrategyOptions +from ._models import VectorStoreStaticChunkingStrategyRequest +from ._models import VectorStoreStaticChunkingStrategyResponse + +from ._enums import AgentStreamEvent +from ._enums import AgentsApiResponseFormatMode +from ._enums import AgentsApiToolChoiceOptionMode +from ._enums import AgentsNamedToolChoiceType +from ._enums import ApiResponseFormat +from ._enums import DoneEvent +from ._enums import ErrorEvent +from ._enums import FilePurpose +from ._enums import FileState +from ._enums import IncompleteRunDetails +from ._enums import ListSortOrder +from ._enums import MessageIncompleteDetailsReason +from ._enums import MessageRole +from ._enums import MessageStatus +from ._enums import MessageStreamEvent +from ._enums import RunStatus +from ._enums import RunStepErrorCode +from ._enums import RunStepStatus +from ._enums import RunStepStreamEvent +from ._enums import RunStepType +from ._enums import RunStreamEvent +from ._enums import ThreadStreamEvent +from ._enums import TruncationStrategy +from ._enums import VectorStoreChunkingStrategyRequestType +from ._enums import VectorStoreChunkingStrategyResponseType +from ._enums import VectorStoreExpirationPolicyAnchor +from ._enums import VectorStoreFileBatchStatus +from ._enums import VectorStoreFileErrorCode +from ._enums import VectorStoreFileStatus +from ._enums import VectorStoreFileStatusFilter +from ._enums import VectorStoreStatus from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import from ._patch import patch_sdk as _patch_sdk @@ -21,16 +168,162 @@ from ._enums import EndpointType __all__ = [ + "Agent", + "AgentDeletionStatus", + "AgentThread", + "AgentThreadCreationOptions", + "AgentsApiResponseFormat", + "AgentsNamedToolChoice", "AppInsightsConfiguration", + "AuthenticationType", + "CodeInterpreterToolDefinition", + "CodeInterpreterToolResource", + "CredentialsSASAuth", "Dataset", + "EndpointType", "Evaluation", "EvaluatorConfiguration", + "FileContentResponse", + "FileDeletionStatus", + "FileListResponse", + "FileSearchToolDefinition", + "FileSearchToolDefinitionDetails", + "FileSearchToolResource", + "FunctionDefinition", + "FunctionName", + "FunctionToolDefinition", "InputData", + "MessageAttachment", + "MessageContent", + "MessageDelta", + "MessageDeltaChunk", + "MessageDeltaContent", + "MessageDeltaImageFileContent", + "MessageDeltaImageFileContentObject", + "MessageDeltaTextAnnotation", + "MessageDeltaTextContent", + "MessageDeltaTextContentObject", + "MessageDeltaTextFileCitationAnnotation", + "MessageDeltaTextFileCitationAnnotationObject", + "MessageDeltaTextFilePathAnnotation", + "MessageDeltaTextFilePathAnnotationObject", + "MessageImageFileContent", + "MessageImageFileDetails", + "MessageIncompleteDetails", + "MessageTextAnnotation", + "MessageTextContent", + "MessageTextDetails", + "MessageTextFileCitationAnnotation", + "MessageTextFileCitationDetails", + "MessageTextFilePathAnnotation", + "MessageTextFilePathDetails", + "OpenAIFile", + "OpenAIPageableListOfAgent", + "OpenAIPageableListOfRunStep", + "OpenAIPageableListOfThreadMessage", + "OpenAIPageableListOfThreadRun", + "OpenAIPageableListOfVectorStore", + "OpenAIPageableListOfVectorStoreFile", + "RequiredAction", + "RequiredFunctionToolCall", + "RequiredFunctionToolCallDetails", + "RequiredToolCall", + "RunCompletionUsage", + "RunError", + "RunStep", + "RunStepCodeInterpreterImageOutput", + "RunStepCodeInterpreterImageReference", + "RunStepCodeInterpreterLogOutput", + "RunStepCodeInterpreterToolCall", + "RunStepCodeInterpreterToolCallDetails", + "RunStepCodeInterpreterToolCallOutput", + "RunStepCompletionUsage", + "RunStepDelta", + "RunStepDeltaChunk", + "RunStepDeltaCodeInterpreterDetailItemObject", + "RunStepDeltaCodeInterpreterImageOutput", + "RunStepDeltaCodeInterpreterImageOutputObject", + "RunStepDeltaCodeInterpreterLogOutput", + "RunStepDeltaCodeInterpreterOutput", + "RunStepDeltaCodeInterpreterToolCall", + "RunStepDeltaDetail", + "RunStepDeltaFileSearchToolCall", + "RunStepDeltaFunction", + "RunStepDeltaFunctionToolCall", + "RunStepDeltaMessageCreation", + "RunStepDeltaMessageCreationObject", + "RunStepDeltaToolCall", + "RunStepDeltaToolCallObject", + "RunStepDetails", + "RunStepError", + "RunStepFileSearchToolCall", + "RunStepFunctionToolCall", + "RunStepFunctionToolCallDetails", + "RunStepMessageCreationDetails", + "RunStepMessageCreationReference", + "RunStepToolCall", + "RunStepToolCallDetails", + "SubmitToolOutputsAction", + "SubmitToolOutputsDetails", "SystemData", + "ThreadDeletionStatus", + "ThreadMessage", + "ThreadMessageOptions", + "ThreadRun", + "ToolDefinition", + "ToolOutput", + "ToolResources", + "TruncationObject", + "UpdateCodeInterpreterToolResourceOptions", "UpdateEvaluationRequest", - "EndpointType", - "AuthenticationType", - "CredentialsSASAuth", + "UpdateFileSearchToolResourceOptions", + "UpdateToolResourcesOptions", + "VectorStore", + "VectorStoreAutoChunkingStrategyRequest", + "VectorStoreAutoChunkingStrategyResponse", + "VectorStoreChunkingStrategyRequest", + "VectorStoreChunkingStrategyResponse", + "VectorStoreDeletionStatus", + "VectorStoreExpirationPolicy", + "VectorStoreFile", + "VectorStoreFileBatch", + "VectorStoreFileCount", + "VectorStoreFileDeletionStatus", + "VectorStoreFileError", + "VectorStoreStaticChunkingStrategyOptions", + "VectorStoreStaticChunkingStrategyRequest", + "VectorStoreStaticChunkingStrategyResponse", + "AgentStreamEvent", + "AgentsApiResponseFormatMode", + "AgentsApiToolChoiceOptionMode", + "AgentsNamedToolChoiceType", + "ApiResponseFormat", + "DoneEvent", + "ErrorEvent", + "FilePurpose", + "FileState", + "IncompleteRunDetails", + "ListSortOrder", + "MessageIncompleteDetailsReason", + "MessageRole", + "MessageStatus", + "MessageStreamEvent", + "RunStatus", + "RunStepErrorCode", + "RunStepStatus", + "RunStepStreamEvent", + "RunStepType", + "RunStreamEvent", + "ThreadStreamEvent", + "TruncationStrategy", + "VectorStoreChunkingStrategyRequestType", + "VectorStoreChunkingStrategyResponseType", + "VectorStoreExpirationPolicyAnchor", + "VectorStoreFileBatchStatus", + "VectorStoreFileErrorCode", + "VectorStoreFileStatus", + "VectorStoreFileStatusFilter", + "VectorStoreStatus", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py index d8d49016bbef..76f30fcf6f85 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py @@ -10,6 +10,120 @@ from azure.core import CaseInsensitiveEnumMeta +class AgentsApiResponseFormatMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Represents the mode in which the model will handle the return format of a tool call.""" + + AUTO = "auto" + """Default value. Let the model handle the return format.""" + NONE = "none" + """Setting the value to ``none``\\ , will result in a 400 Bad request.""" + + +class AgentsApiToolChoiceOptionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies how the tool choice will be used.""" + + NONE = "none" + """The model will not call a function and instead generates a message.""" + AUTO = "auto" + """The model can pick between generating a message or calling a function.""" + + +class AgentsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Available tool types for agents named tools.""" + + FUNCTION = "function" + """Tool type ``function``""" + CODE_INTERPRETER = "code_interpreter" + """Tool type ``code_interpreter``""" + FILE_SEARCH = "file_search" + """Tool type ``file_search``""" + + +class AgentStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Each event in a server-sent events stream has an ``event`` and ``data`` property: + + .. code-block:: + + event: thread.created + data: {"id": "thread_123", "object": "thread", ...} + + We emit events whenever a new object is created, transitions to a new state, or is being + streamed in parts (deltas). For example, we emit ``thread.run.created`` when a new run + is created, ``thread.run.completed`` when a run completes, and so on. When an Agent chooses + to create a message during a run, we emit a ``thread.message.created event``\\ , a + ``thread.message.in_progress`` event, many ``thread.message.delta`` events, and finally a + ``thread.message.completed`` event. + + We may add additional events over time, so we recommend handling unknown events gracefully + in your code. + """ + + THREAD_CREATED = "thread.created" + """Event sent when a new thread is created. The data of this event is of type AgentThread""" + THREAD_RUN_CREATED = "thread.run.created" + """Event sent when a new run is created. The data of this event is of type ThreadRun""" + THREAD_RUN_QUEUED = "thread.run.queued" + """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" + THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" + """Event sent when a run moves to ``in_progress`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" + """Event sent when a run moves to ``requires_action`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_COMPLETED = "thread.run.completed" + """Event sent when a run is completed. The data of this event is of type ThreadRun""" + THREAD_RUN_FAILED = "thread.run.failed" + """Event sent when a run fails. The data of this event is of type ThreadRun""" + THREAD_RUN_CANCELLING = "thread.run.cancelling" + """Event sent when a run moves to ``cancelling`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_CANCELLED = "thread.run.cancelled" + """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" + THREAD_RUN_EXPIRED = "thread.run.expired" + """Event sent when a run is expired. The data of this event is of type ThreadRun""" + THREAD_RUN_STEP_CREATED = "thread.run.step.created" + """Event sent when a new thread run step is created. The data of this event is of type RunStep""" + THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" + """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type + RunStep""" + THREAD_RUN_STEP_DELTA = "thread.run.step.delta" + """Event sent when a run stepis being streamed. The data of this event is of type + RunStepDeltaChunk""" + THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" + """Event sent when a run step is completed. The data of this event is of type RunStep""" + THREAD_RUN_STEP_FAILED = "thread.run.step.failed" + """Event sent when a run step fails. The data of this event is of type RunStep""" + THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" + """Event sent when a run step is cancelled. The data of this event is of type RunStep""" + THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" + """Event sent when a run step is expired. The data of this event is of type RunStep""" + THREAD_MESSAGE_CREATED = "thread.message.created" + """Event sent when a new message is created. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" + """Event sent when a message moves to ``in_progress`` status. The data of this event is of type + ThreadMessage""" + THREAD_MESSAGE_DELTA = "thread.message.delta" + """Event sent when a message is being streamed. The data of this event is of type + MessageDeltaChunk""" + THREAD_MESSAGE_COMPLETED = "thread.message.completed" + """Event sent when a message is completed. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" + """Event sent before a message is completed. The data of this event is of type ThreadMessage""" + ERROR = "error" + """Event sent when an error occurs, such as an internal server error or a timeout.""" + DONE = "done" + """Event sent when the stream is done.""" + + +class ApiResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible API response formats.""" + + TEXT = "text" + """``text`` format should be used for requests involving any sort of ToolCall.""" + JSON_OBJECT = "json_object" + """Using ``json_object`` format will limit the usage of ToolCall to only functions.""" + + class AuthenticationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """to do.""" @@ -21,6 +135,13 @@ class AuthenticationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Shared Access Signature (SAS) authentication""" +class DoneEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Terminal event indicating the successful end of a stream.""" + + DONE = "done" + """Event sent when the stream is done.""" + + class EndpointType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The Type (or category) of the connection.""" @@ -30,3 +151,331 @@ class EndpointType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Serverless API""" AGENT = "Agent" """Agent""" + + +class ErrorEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Terminal event indicating a server side error while streaming.""" + + ERROR = "error" + """Event sent when an error occurs, such as an internal server error or a timeout.""" + + +class FilePurpose(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible values denoting the intended usage of a file.""" + + FINE_TUNE = "fine-tune" + """Indicates a file is used for fine tuning input.""" + FINE_TUNE_RESULTS = "fine-tune-results" + """Indicates a file is used for fine tuning results.""" + AGENTS = "assistants" + """Indicates a file is used as input to agents.""" + AGENTS_OUTPUT = "assistants_output" + """Indicates a file is used as output by agents.""" + BATCH = "batch" + """Indicates a file is used as input to .""" + BATCH_OUTPUT = "batch_output" + """Indicates a file is used as output by a vector store batch operation.""" + VISION = "vision" + """Indicates a file is used as input to a vision operation.""" + + +class FileState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The state of the file.""" + + UPLOADED = "uploaded" + """The file has been uploaded but it's not yet processed. This state is not returned by Azure + OpenAI and exposed only for + compatibility. It can be categorized as an inactive state.""" + PENDING = "pending" + """The operation was created and is not queued to be processed in the future. It can be + categorized as an inactive state.""" + RUNNING = "running" + """The operation has started to be processed. It can be categorized as an active state.""" + PROCESSED = "processed" + """The operation has successfully processed and is ready for consumption. It can be categorized as + a terminal state.""" + ERROR = "error" + """The operation has completed processing with a failure and cannot be further consumed. It can be + categorized as a terminal state.""" + DELETING = "deleting" + """The entity is in the process to be deleted. This state is not returned by Azure OpenAI and + exposed only for compatibility. + It can be categorized as an active state.""" + DELETED = "deleted" + """The entity has been deleted but may still be referenced by other entities predating the + deletion. It can be categorized as a + terminal state.""" + + +class IncompleteRunDetails(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The reason why the run is incomplete. This will point to which specific token limit was reached + over the course of the run. + """ + + MAX_COMPLETION_TOKENS = "max_completion_tokens" + """Maximum completion tokens exceeded""" + MAX_PROMPT_TOKENS = "max_prompt_tokens" + """Maximum prompt tokens exceeded""" + + +class ListSortOrder(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The available sorting options when requesting a list of response objects.""" + + ASCENDING = "asc" + """Specifies an ascending sort order.""" + DESCENDING = "desc" + """Specifies a descending sort order.""" + + +class MessageIncompleteDetailsReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """A set of reasons describing why a message is marked as incomplete.""" + + CONTENT_FILTER = "content_filter" + """The run generating the message was terminated due to content filter flagging.""" + MAX_TOKENS = "max_tokens" + """The run generating the message exhausted available tokens before completion.""" + RUN_CANCELLED = "run_cancelled" + """The run generating the message was cancelled before completion.""" + RUN_FAILED = "run_failed" + """The run generating the message failed.""" + RUN_EXPIRED = "run_expired" + """The run generating the message expired.""" + + +class MessageRole(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible values for roles attributed to messages in a thread.""" + + USER = "user" + """The role representing the end-user.""" + AGENT = "assistant" + """The role representing the agent.""" + + +class MessageStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible execution status values for a thread message.""" + + IN_PROGRESS = "in_progress" + """A run is currently creating this message.""" + INCOMPLETE = "incomplete" + """This message is incomplete. See incomplete_details for more information.""" + COMPLETED = "completed" + """This message was successfully completed by a run.""" + + +class MessageStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Message operation related streaming events.""" + + THREAD_MESSAGE_CREATED = "thread.message.created" + """Event sent when a new message is created. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" + """Event sent when a message moves to ``in_progress`` status. The data of this event is of type + ThreadMessage""" + THREAD_MESSAGE_DELTA = "thread.message.delta" + """Event sent when a message is being streamed. The data of this event is of type + MessageDeltaChunk""" + THREAD_MESSAGE_COMPLETED = "thread.message.completed" + """Event sent when a message is completed. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" + """Event sent before a message is completed. The data of this event is of type ThreadMessage""" + + +class RunStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible values for the status of an agent thread run.""" + + QUEUED = "queued" + """Represents a run that is queued to start.""" + IN_PROGRESS = "in_progress" + """Represents a run that is in progress.""" + REQUIRES_ACTION = "requires_action" + """Represents a run that needs another operation, such as tool output submission, to continue.""" + CANCELLING = "cancelling" + """Represents a run that is in the process of cancellation.""" + CANCELLED = "cancelled" + """Represents a run that has been cancelled.""" + FAILED = "failed" + """Represents a run that failed.""" + COMPLETED = "completed" + """Represents a run that successfully completed.""" + EXPIRED = "expired" + """Represents a run that expired before it could otherwise finish.""" + + +class RunStepErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible error code values attributable to a failed run step.""" + + SERVER_ERROR = "server_error" + """Represents a server error.""" + RATE_LIMIT_EXCEEDED = "rate_limit_exceeded" + """Represents an error indicating configured rate limits were exceeded.""" + + +class RunStepStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible values for the status of a run step.""" + + IN_PROGRESS = "in_progress" + """Represents a run step still in progress.""" + CANCELLED = "cancelled" + """Represents a run step that was cancelled.""" + FAILED = "failed" + """Represents a run step that failed.""" + COMPLETED = "completed" + """Represents a run step that successfully completed.""" + EXPIRED = "expired" + """Represents a run step that expired before otherwise finishing.""" + + +class RunStepStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Run step operation related streaming events.""" + + THREAD_RUN_STEP_CREATED = "thread.run.step.created" + """Event sent when a new thread run step is created. The data of this event is of type RunStep""" + THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" + """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type + RunStep""" + THREAD_RUN_STEP_DELTA = "thread.run.step.delta" + """Event sent when a run stepis being streamed. The data of this event is of type + RunStepDeltaChunk""" + THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" + """Event sent when a run step is completed. The data of this event is of type RunStep""" + THREAD_RUN_STEP_FAILED = "thread.run.step.failed" + """Event sent when a run step fails. The data of this event is of type RunStep""" + THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" + """Event sent when a run step is cancelled. The data of this event is of type RunStep""" + THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" + """Event sent when a run step is expired. The data of this event is of type RunStep""" + + +class RunStepType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible types of run steps.""" + + MESSAGE_CREATION = "message_creation" + """Represents a run step to create a message.""" + TOOL_CALLS = "tool_calls" + """Represents a run step that calls tools.""" + + +class RunStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Run operation related streaming events.""" + + THREAD_RUN_CREATED = "thread.run.created" + """Event sent when a new run is created. The data of this event is of type ThreadRun""" + THREAD_RUN_QUEUED = "thread.run.queued" + """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" + THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" + """Event sent when a run moves to ``in_progress`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" + """Event sent when a run moves to ``requires_action`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_COMPLETED = "thread.run.completed" + """Event sent when a run is completed. The data of this event is of type ThreadRun""" + THREAD_RUN_FAILED = "thread.run.failed" + """Event sent when a run fails. The data of this event is of type ThreadRun""" + THREAD_RUN_CANCELLING = "thread.run.cancelling" + """Event sent when a run moves to ``cancelling`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_CANCELLED = "thread.run.cancelled" + """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" + THREAD_RUN_EXPIRED = "thread.run.expired" + """Event sent when a run is expired. The data of this event is of type ThreadRun""" + + +class ThreadStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Thread operation related streaming events.""" + + THREAD_CREATED = "thread.created" + """Event sent when a new thread is created. The data of this event is of type AgentThread""" + + +class TruncationStrategy(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible truncation strategies for the thread.""" + + AUTO = "auto" + """Default value. Messages in the middle of the thread will be dropped to fit the context length + of the model.""" + LAST_MESSAGES = "last_messages" + """The thread will truncate to the ``lastMessages`` count of recent messages.""" + + +class VectorStoreChunkingStrategyRequestType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of chunking strategy.""" + + AUTO = "auto" + STATIC = "static" + + +class VectorStoreChunkingStrategyResponseType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of chunking strategy.""" + + OTHER = "other" + STATIC = "static" + + +class VectorStoreExpirationPolicyAnchor(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Describes the relationship between the days and the expiration of this vector store.""" + + LAST_ACTIVE_AT = "last_active_at" + """The expiration policy is based on the last time the vector store was active.""" + + +class VectorStoreFileBatchStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The status of the vector store file batch.""" + + IN_PROGRESS = "in_progress" + """The vector store is still processing this file batch.""" + COMPLETED = "completed" + """the vector store file batch is ready for use.""" + CANCELLED = "cancelled" + """The vector store file batch was cancelled.""" + FAILED = "failed" + """The vector store file batch failed to process.""" + + +class VectorStoreFileErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Error code variants for vector store file processing.""" + + INTERNAL_ERROR = "internal_error" + """An internal error occurred.""" + FILE_NOT_FOUND = "file_not_found" + """The file was not found.""" + PARSING_ERROR = "parsing_error" + """The file could not be parsed.""" + UNHANDLED_MIME_TYPE = "unhandled_mime_type" + """The file has an unhandled mime type.""" + + +class VectorStoreFileStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Vector store file status.""" + + IN_PROGRESS = "in_progress" + """The file is currently being processed.""" + COMPLETED = "completed" + """The file has been successfully processed.""" + FAILED = "failed" + """The file has failed to process.""" + CANCELLED = "cancelled" + """The file was cancelled.""" + + +class VectorStoreFileStatusFilter(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Query parameter filter for vector store file retrieval endpoint.""" + + IN_PROGRESS = "in_progress" + """Retrieve only files that are currently being processed""" + COMPLETED = "completed" + """Retrieve only files that have been successfully processed""" + FAILED = "failed" + """Retrieve only files that have failed to process""" + CANCELLED = "cancelled" + """Retrieve only files that were cancelled""" + + +class VectorStoreStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Vector store possible status.""" + + EXPIRED = "expired" + """expired status indicates that this vector store has expired and is no longer available for use.""" + IN_PROGRESS = "in_progress" + """in_progress status indicates that this vector store is still processing files.""" + COMPLETED = "completed" + """completed status indicates that this vector store is ready for use.""" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py index 822eb40c3233..e57be1a42b29 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py @@ -12,11 +12,359 @@ from .. import _model_base from .._model_base import rest_discriminator, rest_field -from ._enums import AuthenticationType +from ._enums import ( + AuthenticationType, + RunStepType, + VectorStoreChunkingStrategyRequestType, + VectorStoreChunkingStrategyResponseType, +) if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports - from .. import models as _models + from .. import _types, models as _models + + +class Agent(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Represents an agent that can call the model and use tools. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always assistant. Required. Default value is + "assistant". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar name: The name of the agent. Required. + :vartype name: str + :ivar description: The description of the agent. Required. + :vartype description: str + :ivar model: The ID of the model to use. Required. + :vartype model: str + :ivar instructions: The system instructions for the agent to use. Required. + :vartype instructions: str + :ivar tools: The collection of tools enabled for the agent. Required. + :vartype tools: list[~azure.ai.client.models.ToolDefinition] + :ivar tool_resources: A set of resources that are used by the agent's tools. The resources are + specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Required. + :vartype tool_resources: ~azure.ai.client.models.ToolResources + :ivar temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Required. + :vartype temperature: float + :ivar top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Required. + :vartype top_p: float + :ivar response_format: The response format of the tool calls used by this agent. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat + :vartype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode or + ~azure.ai.client.models.AgentsApiResponseFormat + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["assistant"] = rest_field() + """The object type, which is always assistant. Required. Default value is \"assistant\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + name: str = rest_field() + """The name of the agent. Required.""" + description: str = rest_field() + """The description of the agent. Required.""" + model: str = rest_field() + """The ID of the model to use. Required.""" + instructions: str = rest_field() + """The system instructions for the agent to use. Required.""" + tools: List["_models.ToolDefinition"] = rest_field() + """The collection of tools enabled for the agent. Required.""" + tool_resources: "_models.ToolResources" = rest_field() + """A set of resources that are used by the agent's tools. The resources are specific to the type + of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Required.""" + temperature: float = rest_field() + """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, + while lower values like 0.2 will make it more focused and deterministic. Required.""" + top_p: float = rest_field() + """An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Required.""" + response_format: Optional["_types.AgentsApiResponseFormatOption"] = rest_field() + """The response format of the tool calls used by this agent. Is one of the following types: str, + Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + name: str, + description: str, + model: str, + instructions: str, + tools: List["_models.ToolDefinition"], + tool_resources: "_models.ToolResources", + temperature: float, + top_p: float, + metadata: Dict[str, str], + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["assistant"] = "assistant" + + +class AgentDeletionStatus(_model_base.Model): + """The status of an agent deletion operation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'assistant.deleted'. Required. Default value is + "assistant.deleted". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["assistant.deleted"] = rest_field() + """The object type, which is always 'assistant.deleted'. Required. Default value is + \"assistant.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["assistant.deleted"] = "assistant.deleted" + + +class AgentsApiResponseFormat(_model_base.Model): + """An object describing the expected output of the model. If ``json_object`` only ``function`` + type ``tools`` are allowed to be passed to the Run. + If ``text`` the model can return text or any value needed. + + :ivar type: Must be one of ``text`` or ``json_object``. Known values are: "text" and + "json_object". + :vartype type: str or ~azure.ai.client.models.ApiResponseFormat + """ + + type: Optional[Union[str, "_models.ApiResponseFormat"]] = rest_field() + """Must be one of ``text`` or ``json_object``. Known values are: \"text\" and \"json_object\".""" + + @overload + def __init__( + self, + *, + type: Optional[Union[str, "_models.ApiResponseFormat"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class AgentsNamedToolChoice(_model_base.Model): + """Specifies a tool the model should use. Use to force the model to call a specific tool. + + + :ivar type: the type of tool. If type is ``function``\\ , the function name must be set. + Required. Known values are: "function", "code_interpreter", and "file_search". + :vartype type: str or ~azure.ai.client.models.AgentsNamedToolChoiceType + :ivar function: The name of the function to call. + :vartype function: ~azure.ai.client.models.FunctionName + """ + + type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field() + """the type of tool. If type is ``function``\ , the function name must be set. Required. Known + values are: \"function\", \"code_interpreter\", and \"file_search\".""" + function: Optional["_models.FunctionName"] = rest_field() + """The name of the function to call.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.AgentsNamedToolChoiceType"], + function: Optional["_models.FunctionName"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class AgentThread(_model_base.Model): + """Information about a single thread associated with an agent. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread'. Required. Default value is "thread". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar tool_resources: A set of resources that are made available to the agent's tools in this + thread. The resources are specific to the type + of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list + of vector store IDs. Required. + :vartype tool_resources: ~azure.ai.client.models.ToolResources + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread"] = rest_field() + """The object type, which is always 'thread'. Required. Default value is \"thread\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + tool_resources: "_models.ToolResources" = rest_field() + """A set of resources that are made available to the agent's tools in this thread. The resources + are specific to the type + of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list + of vector store IDs. Required.""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + tool_resources: "_models.ToolResources", + metadata: Dict[str, str], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread"] = "thread" + + +class AgentThreadCreationOptions(_model_base.Model): + """The details used to create a new agent thread. + + :ivar messages: The initial messages to associate with the new thread. + :vartype messages: list[~azure.ai.client.models.ThreadMessageOptions] + :ivar tool_resources: A set of resources that are made available to the agent's tools in this + thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. + :vartype tool_resources: ~azure.ai.client.models.ToolResources + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. + :vartype metadata: dict[str, str] + """ + + messages: Optional[List["_models.ThreadMessageOptions"]] = rest_field() + """The initial messages to associate with the new thread.""" + tool_resources: Optional["_models.ToolResources"] = rest_field() + """A set of resources that are made available to the agent's tools in this thread. The resources + are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires + a list of vector store IDs.""" + metadata: Optional[Dict[str, str]] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length.""" + + @overload + def __init__( + self, + *, + messages: Optional[List["_models.ThreadMessageOptions"]] = None, + tool_resources: Optional["_models.ToolResources"] = None, + metadata: Optional[Dict[str, str]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) class InputData(_model_base.Model): @@ -98,6 +446,100 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, type="app_insights", **kwargs) +class ToolDefinition(_model_base.Model): + """An abstract representation of an input tool definition that an agent can use. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + CodeInterpreterToolDefinition, FileSearchToolDefinition, FunctionToolDefinition + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class CodeInterpreterToolDefinition(ToolDefinition, discriminator="code_interpreter"): + """The input definition information for a code interpreter tool as used to configure an agent. + + + :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is + "code_interpreter". + :vartype type: str + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'code_interpreter'. Required. Default value is + \"code_interpreter\".""" + + @overload + def __init__( + self, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="code_interpreter", **kwargs) + + +class CodeInterpreterToolResource(_model_base.Model): + """A set of resources that are used by the ``code_interpreter`` tool. + + :ivar file_ids: A list of file IDs made available to the ``code_interpreter`` tool. There can + be a maximum of 20 files + associated with the tool. + :vartype file_ids: list[str] + """ + + file_ids: Optional[List[str]] = rest_field() + """A list of file IDs made available to the ``code_interpreter`` tool. There can be a maximum of + 20 files + associated with the tool.""" + + @overload + def __init__( + self, + *, + file_ids: Optional[List[str]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + class ConnectionProperties(_model_base.Model): """to do. @@ -393,60 +835,409 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) -class SystemData(_model_base.Model): - """Metadata pertaining to creation and last modification of the resource. +class FileContentResponse(_model_base.Model): + """A response from a file get content operation. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar created_at: The timestamp the resource was created at. - :vartype created_at: ~datetime.datetime - :ivar created_by: The identity that created the resource. - :vartype created_by: str - :ivar created_by_type: The identity type that created the resource. - :vartype created_by_type: str - :ivar last_modified_at: The timestamp of resource last modification (UTC). - :vartype last_modified_at: ~datetime.datetime + :ivar content: The content of the file, in bytes. Required. + :vartype content: bytes """ - created_at: Optional[datetime.datetime] = rest_field(name="createdAt", visibility=["read"], format="rfc3339") - """The timestamp the resource was created at.""" - created_by: Optional[str] = rest_field(name="createdBy", visibility=["read"]) - """The identity that created the resource.""" - created_by_type: Optional[str] = rest_field(name="createdByType", visibility=["read"]) - """The identity type that created the resource.""" - last_modified_at: Optional[datetime.datetime] = rest_field( - name="lastModifiedAt", visibility=["read"], format="rfc3339" - ) - """The timestamp of resource last modification (UTC).""" - + content: bytes = rest_field(format="base64") + """The content of the file, in bytes. Required.""" -class UpdateEvaluationRequest(_model_base.Model): - """Update Evaluation Request. + @overload + def __init__( + self, + *, + content: bytes, + ): ... - All required parameters must be populated in order to send to server. + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ - :ivar tags: Tags to be updated. Required. - :vartype tags: dict[str, str] - :ivar display_name: Display Name. Required. - :vartype display_name: str - :ivar description: Description. Required. + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FileDeletionStatus(_model_base.Model): + """A status response from a file deletion operation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'file'. Required. Default value is "file". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["file"] = rest_field() + """The object type, which is always 'file'. Required. Default value is \"file\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["file"] = "file" + + +class FileListResponse(_model_base.Model): + """The response data from a file list operation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always 'list'. Required. Default value is "list". + :vartype object: str + :ivar data: The files returned for the request. Required. + :vartype data: list[~azure.ai.client.models.OpenAIFile] + """ + + object: Literal["list"] = rest_field() + """The object type, which is always 'list'. Required. Default value is \"list\".""" + data: List["_models.OpenAIFile"] = rest_field() + """The files returned for the request. Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.OpenAIFile"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class FileSearchToolDefinition(ToolDefinition, discriminator="file_search"): + """The input definition information for a file search tool as used to configure an agent. + + + :ivar type: The object type, which is always 'file_search'. Required. Default value is + "file_search". + :vartype type: str + :ivar file_search: Options overrides for the file search tool. + :vartype file_search: ~azure.ai.client.models.FileSearchToolDefinitionDetails + """ + + type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" + file_search: Optional["_models.FileSearchToolDefinitionDetails"] = rest_field() + """Options overrides for the file search tool.""" + + @overload + def __init__( + self, + *, + file_search: Optional["_models.FileSearchToolDefinitionDetails"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="file_search", **kwargs) + + +class FileSearchToolDefinitionDetails(_model_base.Model): + """Options overrides for the file search tool. + + :ivar max_num_results: The maximum number of results the file search tool should output. The + default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 + inclusive. + + Note that the file search tool may output fewer than ``max_num_results`` results. See the file + search tool documentation for more information. + :vartype max_num_results: int + """ + + max_num_results: Optional[int] = rest_field() + """The maximum number of results the file search tool should output. The default is 20 for gpt-4* + models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than ``max_num_results`` results. See the file + search tool documentation for more information.""" + + @overload + def __init__( + self, + *, + max_num_results: Optional[int] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FileSearchToolResource(_model_base.Model): + """A set of resources that are used by the ``file_search`` tool. + + :ivar vector_store_ids: The ID of the vector store attached to this agent. There can be a + maximum of 1 vector + store attached to the agent. + :vartype vector_store_ids: list[str] + """ + + vector_store_ids: Optional[List[str]] = rest_field() + """The ID of the vector store attached to this agent. There can be a maximum of 1 vector + store attached to the agent.""" + + @overload + def __init__( + self, + *, + vector_store_ids: Optional[List[str]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FunctionDefinition(_model_base.Model): + """The input definition information for a function. + + + :ivar name: The name of the function to be called. Required. + :vartype name: str + :ivar description: A description of what the function does, used by the model to choose when + and how to call the function. :vartype description: str + :ivar parameters: The parameters the functions accepts, described as a JSON Schema object. + Required. + :vartype parameters: any """ - tags: Dict[str, str] = rest_field() - """Tags to be updated. Required.""" - display_name: str = rest_field(name="displayName") - """Display Name. Required.""" - description: str = rest_field() - """Description. Required.""" + name: str = rest_field() + """The name of the function to be called. Required.""" + description: Optional[str] = rest_field() + """A description of what the function does, used by the model to choose when and how to call the + function.""" + parameters: Any = rest_field() + """The parameters the functions accepts, described as a JSON Schema object. Required.""" @overload def __init__( self, *, - tags: Dict[str, str], - display_name: str, - description: str, + name: str, + parameters: Any, + description: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FunctionName(_model_base.Model): + """The function name that will be used, if using the ``function`` tool. + + + :ivar name: The name of the function to call. Required. + :vartype name: str + """ + + name: str = rest_field() + """The name of the function to call. Required.""" + + @overload + def __init__( + self, + *, + name: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class FunctionToolDefinition(ToolDefinition, discriminator="function"): + """The input definition information for a function tool as used to configure an agent. + + + :ivar type: The object type, which is always 'function'. Required. Default value is "function". + :vartype type: str + :ivar function: The definition of the concrete function that the function tool should call. + Required. + :vartype function: ~azure.ai.client.models.FunctionDefinition + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'function'. Required. Default value is \"function\".""" + function: "_models.FunctionDefinition" = rest_field() + """The definition of the concrete function that the function tool should call. Required.""" + + @overload + def __init__( + self, + *, + function: "_models.FunctionDefinition", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="function", **kwargs) + + +class MessageAttachment(_model_base.Model): + """This describes to which tools a file has been attached. + + + :ivar file_id: The ID of the file to attach to the message. Required. + :vartype file_id: str + :ivar tools: The tools to add to this file. Required. + :vartype tools: list[~azure.ai.client.models.CodeInterpreterToolDefinition or + ~azure.ai.client.models.FileSearchToolDefinition] + """ + + file_id: str = rest_field() + """The ID of the file to attach to the message. Required.""" + tools: List["_types.MessageAttachmentToolDefinition"] = rest_field() + """The tools to add to this file. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + tools: List["_types.MessageAttachmentToolDefinition"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MessageContent(_model_base.Model): + """An abstract representation of a single item of thread message content. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageImageFileContent, MessageTextContent + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MessageDelta(_model_base.Model): + """Represents the typed 'delta' payload within a streaming message delta chunk. + + + :ivar role: The entity that produced the message. Required. Known values are: "user" and + "assistant". + :vartype role: str or ~azure.ai.client.models.MessageRole + :ivar content: The content of the message as an array of text and/or images. Required. + :vartype content: list[~azure.ai.client.models.MessageDeltaContent] + """ + + role: Union[str, "_models.MessageRole"] = rest_field() + """The entity that produced the message. Required. Known values are: \"user\" and \"assistant\".""" + content: List["_models.MessageDeltaContent"] = rest_field() + """The content of the message as an array of text and/or images. Required.""" + + @overload + def __init__( + self, + *, + role: Union[str, "_models.MessageRole"], + content: List["_models.MessageDeltaContent"], ): ... @overload @@ -458,3 +1249,4158 @@ def __init__(self, mapping: Mapping[str, Any]): def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation super().__init__(*args, **kwargs) + + +class MessageDeltaChunk(_model_base.Model): + """Represents a message delta i.e. any changed fields on a message during streaming. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier of the message, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``thread.message.delta``. Required. Default + value is "thread.message.delta". + :vartype object: str + :ivar delta: The delta containing the fields that have changed on the Message. Required. + :vartype delta: ~azure.ai.client.models.MessageDelta + """ + + id: str = rest_field() + """The identifier of the message, which can be referenced in API endpoints. Required.""" + object: Literal["thread.message.delta"] = rest_field() + """The object type, which is always ``thread.message.delta``. Required. Default value is + \"thread.message.delta\".""" + delta: "_models.MessageDelta" = rest_field() + """The delta containing the fields that have changed on the Message. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + delta: "_models.MessageDelta", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.message.delta"] = "thread.message.delta" + + +class MessageDeltaContent(_model_base.Model): + """The abstract base representation of a partial streamed message content payload. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageDeltaImageFileContent, MessageDeltaTextContent + + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field() + """The index of the content part of the message. Required.""" + type: str = rest_discriminator(name="type") + """The type of content for this content part. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MessageDeltaImageFileContent(MessageDeltaContent, discriminator="image_file"): + """Represents a streamed image file content part within a streaming message delta chunk. + + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part, which is always "image_file.". Required. + Default value is "image_file". + :vartype type: str + :ivar image_file: The image_file data. + :vartype image_file: ~azure.ai.client.models.MessageDeltaImageFileContentObject + """ + + type: Literal["image_file"] = rest_discriminator(name="type") # type: ignore + """The type of content for this content part, which is always \"image_file.\". Required. Default + value is \"image_file\".""" + image_file: Optional["_models.MessageDeltaImageFileContentObject"] = rest_field() + """The image_file data.""" + + @overload + def __init__( + self, + *, + index: int, + image_file: Optional["_models.MessageDeltaImageFileContentObject"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="image_file", **kwargs) + + +class MessageDeltaImageFileContentObject(_model_base.Model): + """Represents the 'image_file' payload within streaming image file content. + + :ivar file_id: The file ID of the image in the message content. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field() + """The file ID of the image in the message content.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MessageDeltaTextAnnotation(_model_base.Model): + """The abstract base representation of a streamed text content part's text annotation. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageDeltaTextFileCitationAnnotation, MessageDeltaTextFilePathAnnotation + + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field() + """The index of the annotation within a text content part. Required.""" + type: str = rest_discriminator(name="type") + """The type of the text content annotation. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MessageDeltaTextContent(MessageDeltaContent, discriminator="text"): + """Represents a streamed text content part within a streaming message delta chunk. + + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part, which is always "text.". Required. + Default value is "text". + :vartype type: str + :ivar text: The text content details. + :vartype text: ~azure.ai.client.models.MessageDeltaTextContentObject + """ + + type: Literal["text"] = rest_discriminator(name="type") # type: ignore + """The type of content for this content part, which is always \"text.\". Required. Default value + is \"text\".""" + text: Optional["_models.MessageDeltaTextContentObject"] = rest_field() + """The text content details.""" + + @overload + def __init__( + self, + *, + index: int, + text: Optional["_models.MessageDeltaTextContentObject"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="text", **kwargs) + + +class MessageDeltaTextContentObject(_model_base.Model): + """Represents the data of a streamed text content part within a streaming message delta chunk. + + :ivar value: The data that makes up the text. + :vartype value: str + :ivar annotations: Annotations for the text. + :vartype annotations: list[~azure.ai.client.models.MessageDeltaTextAnnotation] + """ + + value: Optional[str] = rest_field() + """The data that makes up the text.""" + annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = rest_field() + """Annotations for the text.""" + + @overload + def __init__( + self, + *, + value: Optional[str] = None, + annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MessageDeltaTextFileCitationAnnotation(MessageDeltaTextAnnotation, discriminator="file_citation"): + """Represents a streamed file citation applied to a streaming text content part. + + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation, which is always "file_citation.". + Required. Default value is "file_citation". + :vartype type: str + :ivar file_citation: The file citation information. + :vartype file_citation: ~azure.ai.client.models.MessageDeltaTextFileCitationAnnotationObject + :ivar text: The text in the message content that needs to be replaced. + :vartype text: str + :ivar start_index: The start index of this annotation in the content text. + :vartype start_index: int + :ivar end_index: The end index of this annotation in the content text. + :vartype end_index: int + """ + + type: Literal["file_citation"] = rest_discriminator(name="type") # type: ignore + """The type of the text content annotation, which is always \"file_citation.\". Required. Default + value is \"file_citation\".""" + file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = rest_field() + """The file citation information.""" + text: Optional[str] = rest_field() + """The text in the message content that needs to be replaced.""" + start_index: Optional[int] = rest_field() + """The start index of this annotation in the content text.""" + end_index: Optional[int] = rest_field() + """The end index of this annotation in the content text.""" + + @overload + def __init__( + self, + *, + index: int, + file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = None, + text: Optional[str] = None, + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="file_citation", **kwargs) + + +class MessageDeltaTextFileCitationAnnotationObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the data of a streamed file citation as applied to a streaming text content part. + + :ivar file_id: The ID of the specific file the citation is from. + :vartype file_id: str + :ivar quote: The specific quote in the cited file. + :vartype quote: str + """ + + file_id: Optional[str] = rest_field() + """The ID of the specific file the citation is from.""" + quote: Optional[str] = rest_field() + """The specific quote in the cited file.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + quote: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MessageDeltaTextFilePathAnnotation(MessageDeltaTextAnnotation, discriminator="file_path"): + """Represents a streamed file path annotation applied to a streaming text content part. + + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation, which is always "file_path.". Required. + Default value is "file_path". + :vartype type: str + :ivar file_path: The file path information. + :vartype file_path: ~azure.ai.client.models.MessageDeltaTextFilePathAnnotationObject + :ivar start_index: The start index of this annotation in the content text. + :vartype start_index: int + :ivar end_index: The end index of this annotation in the content text. + :vartype end_index: int + :ivar text: The text in the message content that needs to be replaced. + :vartype text: str + """ + + type: Literal["file_path"] = rest_discriminator(name="type") # type: ignore + """The type of the text content annotation, which is always \"file_path.\". Required. Default + value is \"file_path\".""" + file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = rest_field() + """The file path information.""" + start_index: Optional[int] = rest_field() + """The start index of this annotation in the content text.""" + end_index: Optional[int] = rest_field() + """The end index of this annotation in the content text.""" + text: Optional[str] = rest_field() + """The text in the message content that needs to be replaced.""" + + @overload + def __init__( + self, + *, + index: int, + file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = None, + start_index: Optional[int] = None, + end_index: Optional[int] = None, + text: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="file_path", **kwargs) + + +class MessageDeltaTextFilePathAnnotationObject(_model_base.Model): + """Represents the data of a streamed file path annotation as applied to a streaming text content + part. + + :ivar file_id: The file ID for the annotation. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field() + """The file ID for the annotation.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MessageImageFileContent(MessageContent, discriminator="image_file"): + """A representation of image file content in a thread message. + + + :ivar type: The object type, which is always 'image_file'. Required. Default value is + "image_file". + :vartype type: str + :ivar image_file: The image file for this thread message content item. Required. + :vartype image_file: ~azure.ai.client.models.MessageImageFileDetails + """ + + type: Literal["image_file"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'image_file'. Required. Default value is \"image_file\".""" + image_file: "_models.MessageImageFileDetails" = rest_field() + """The image file for this thread message content item. Required.""" + + @overload + def __init__( + self, + *, + image_file: "_models.MessageImageFileDetails", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="image_file", **kwargs) + + +class MessageImageFileDetails(_model_base.Model): + """An image reference, as represented in thread message content. + + + :ivar file_id: The ID for the file associated with this image. Required. + :vartype file_id: str + """ + + file_id: str = rest_field() + """The ID for the file associated with this image. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MessageIncompleteDetails(_model_base.Model): + """Information providing additional detail about a message entering an incomplete status. + + + :ivar reason: The provided reason describing why the message was marked as incomplete. + Required. Known values are: "content_filter", "max_tokens", "run_cancelled", "run_failed", and + "run_expired". + :vartype reason: str or ~azure.ai.client.models.MessageIncompleteDetailsReason + """ + + reason: Union[str, "_models.MessageIncompleteDetailsReason"] = rest_field() + """The provided reason describing why the message was marked as incomplete. Required. Known values + are: \"content_filter\", \"max_tokens\", \"run_cancelled\", \"run_failed\", and + \"run_expired\".""" + + @overload + def __init__( + self, + *, + reason: Union[str, "_models.MessageIncompleteDetailsReason"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MessageTextAnnotation(_model_base.Model): + """An abstract representation of an annotation to text thread message content. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageTextFileCitationAnnotation, MessageTextFilePathAnnotation + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + text: str = rest_field() + """The textual content associated with this text annotation item. Required.""" + + @overload + def __init__( + self, + *, + type: str, + text: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MessageTextContent(MessageContent, discriminator="text"): + """A representation of a textual item of thread message content. + + + :ivar type: The object type, which is always 'text'. Required. Default value is "text". + :vartype type: str + :ivar text: The text and associated annotations for this thread message content item. Required. + :vartype text: ~azure.ai.client.models.MessageTextDetails + """ + + type: Literal["text"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'text'. Required. Default value is \"text\".""" + text: "_models.MessageTextDetails" = rest_field() + """The text and associated annotations for this thread message content item. Required.""" + + @overload + def __init__( + self, + *, + text: "_models.MessageTextDetails", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="text", **kwargs) + + +class MessageTextDetails(_model_base.Model): + """The text and associated annotations for a single item of agent thread message content. + + + :ivar value: The text data. Required. + :vartype value: str + :ivar annotations: A list of annotations associated with this text. Required. + :vartype annotations: list[~azure.ai.client.models.MessageTextAnnotation] + """ + + value: str = rest_field() + """The text data. Required.""" + annotations: List["_models.MessageTextAnnotation"] = rest_field() + """A list of annotations associated with this text. Required.""" + + @overload + def __init__( + self, + *, + value: str, + annotations: List["_models.MessageTextAnnotation"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MessageTextFileCitationAnnotation(MessageTextAnnotation, discriminator="file_citation"): + """A citation within the message that points to a specific quote from a specific File associated + with the agent or the message. Generated when the agent uses the 'file_search' tool to search + files. + + + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + :ivar type: The object type, which is always 'file_citation'. Required. Default value is + "file_citation". + :vartype type: str + :ivar file_citation: A citation within the message that points to a specific quote from a + specific file. + Generated when the agent uses the "file_search" tool to search files. Required. + :vartype file_citation: ~azure.ai.client.models.MessageTextFileCitationDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["file_citation"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'file_citation'. Required. Default value is \"file_citation\".""" + file_citation: "_models.MessageTextFileCitationDetails" = rest_field() + """A citation within the message that points to a specific quote from a specific file. + Generated when the agent uses the \"file_search\" tool to search files. Required.""" + start_index: Optional[int] = rest_field() + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field() + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + text: str, + file_citation: "_models.MessageTextFileCitationDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="file_citation", **kwargs) + + +class MessageTextFileCitationDetails(_model_base.Model): + """A representation of a file-based text citation, as used in a file-based annotation of text + thread message content. + + + :ivar file_id: The ID of the file associated with this citation. Required. + :vartype file_id: str + :ivar quote: The specific quote cited in the associated file. Required. + :vartype quote: str + """ + + file_id: str = rest_field() + """The ID of the file associated with this citation. Required.""" + quote: str = rest_field() + """The specific quote cited in the associated file. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + quote: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class MessageTextFilePathAnnotation(MessageTextAnnotation, discriminator="file_path"): + """A citation within the message that points to a file located at a specific path. + + + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + :ivar type: The object type, which is always 'file_path'. Required. Default value is + "file_path". + :vartype type: str + :ivar file_path: A URL for the file that's generated when the agent used the code_interpreter + tool to generate a file. Required. + :vartype file_path: ~azure.ai.client.models.MessageTextFilePathDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["file_path"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'file_path'. Required. Default value is \"file_path\".""" + file_path: "_models.MessageTextFilePathDetails" = rest_field() + """A URL for the file that's generated when the agent used the code_interpreter tool to generate a + file. Required.""" + start_index: Optional[int] = rest_field() + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field() + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + text: str, + file_path: "_models.MessageTextFilePathDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="file_path", **kwargs) + + +class MessageTextFilePathDetails(_model_base.Model): + """An encapsulation of an image file ID, as used by message image content. + + + :ivar file_id: The ID of the specific file that the citation is from. Required. + :vartype file_id: str + """ + + file_id: str = rest_field() + """The ID of the specific file that the citation is from. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class OpenAIFile(_model_base.Model): + """Represents an agent that can call the model and use tools. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always 'file'. Required. Default value is "file". + :vartype object: str + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar bytes: The size of the file, in bytes. Required. + :vartype bytes: int + :ivar filename: The name of the file. Required. + :vartype filename: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar purpose: The intended purpose of a file. Required. Known values are: "fine-tune", + "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". + :vartype purpose: str or ~azure.ai.client.models.FilePurpose + :ivar status: The state of the file. This field is available in Azure OpenAI only. Known values + are: "uploaded", "pending", "running", "processed", "error", "deleting", and "deleted". + :vartype status: str or ~azure.ai.client.models.FileState + :ivar status_details: The error message with details in case processing of this file failed. + This field is available in Azure OpenAI only. + :vartype status_details: str + """ + + object: Literal["file"] = rest_field() + """The object type, which is always 'file'. Required. Default value is \"file\".""" + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + bytes: int = rest_field() + """The size of the file, in bytes. Required.""" + filename: str = rest_field() + """The name of the file. Required.""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + purpose: Union[str, "_models.FilePurpose"] = rest_field() + """The intended purpose of a file. Required. Known values are: \"fine-tune\", + \"fine-tune-results\", \"assistants\", \"assistants_output\", \"batch\", \"batch_output\", and + \"vision\".""" + status: Optional[Union[str, "_models.FileState"]] = rest_field() + """The state of the file. This field is available in Azure OpenAI only. Known values are: + \"uploaded\", \"pending\", \"running\", \"processed\", \"error\", \"deleting\", and + \"deleted\".""" + status_details: Optional[str] = rest_field() + """The error message with details in case processing of this file failed. This field is available + in Azure OpenAI only.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + bytes: int, + filename: str, + created_at: datetime.datetime, + purpose: Union[str, "_models.FilePurpose"], + status: Optional[Union[str, "_models.FileState"]] = None, + status_details: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["file"] = "file" + + +class OpenAIPageableListOfAgent(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.client.models.Agent] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.Agent"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.Agent"], + first_id: str, + last_id: str, + has_more: bool, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfRunStep(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.client.models.RunStep] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.RunStep"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.RunStep"], + first_id: str, + last_id: str, + has_more: bool, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfThreadMessage(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.client.models.ThreadMessage] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.ThreadMessage"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.ThreadMessage"], + first_id: str, + last_id: str, + has_more: bool, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfThreadRun(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.client.models.ThreadRun] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.ThreadRun"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.ThreadRun"], + first_id: str, + last_id: str, + has_more: bool, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfVectorStore(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.client.models.VectorStore] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.VectorStore"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.VectorStore"], + first_id: str, + last_id: str, + has_more: bool, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfVectorStoreFile(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.client.models.VectorStoreFile] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.VectorStoreFile"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.VectorStoreFile"], + first_id: str, + last_id: str, + has_more: bool, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class RequiredAction(_model_base.Model): + """An abstract representation of a required action for an agent thread run to continue. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + SubmitToolOutputsAction + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RequiredToolCall(_model_base.Model): + """An abstract representation a a tool invocation needed by the model to continue a run. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RequiredFunctionToolCall + + + :ivar type: The object type for the required tool call. Required. Default value is None. + :vartype type: str + :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. + Required. + :vartype id: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type for the required tool call. Required. Default value is None.""" + id: str = rest_field() + """The ID of the tool call. This ID must be referenced when submitting tool outputs. Required.""" + + @overload + def __init__( + self, + *, + type: str, + id: str, # pylint: disable=redefined-builtin + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RequiredFunctionToolCall(RequiredToolCall, discriminator="function"): + """A representation of a requested call to a function tool, needed by the model to continue + evaluation of a run. + + + :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. + Required. + :vartype id: str + :ivar type: The object type of the required tool call. Always 'function' for function tools. + Required. Default value is "function". + :vartype type: str + :ivar function: Detailed information about the function to be executed by the tool that + includes name and arguments. Required. + :vartype function: ~azure.ai.client.models.RequiredFunctionToolCallDetails + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object type of the required tool call. Always 'function' for function tools. Required. + Default value is \"function\".""" + function: "_models.RequiredFunctionToolCallDetails" = rest_field() + """Detailed information about the function to be executed by the tool that includes name and + arguments. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + function: "_models.RequiredFunctionToolCallDetails", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="function", **kwargs) + + +class RequiredFunctionToolCallDetails(_model_base.Model): + """The detailed information for a function invocation, as provided by a required action invoking a + function tool, that includes the name of and arguments to the function. + + + :ivar name: The name of the function. Required. + :vartype name: str + :ivar arguments: The arguments to use when invoking the named function, as provided by the + model. Arguments are presented as a JSON document that should be validated and parsed for + evaluation. Required. + :vartype arguments: str + """ + + name: str = rest_field() + """The name of the function. Required.""" + arguments: str = rest_field() + """The arguments to use when invoking the named function, as provided by the model. Arguments are + presented as a JSON document that should be validated and parsed for evaluation. Required.""" + + @overload + def __init__( + self, + *, + name: str, + arguments: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunCompletionUsage(_model_base.Model): + """Usage statistics related to the run. This value will be ``null`` if the run is not in a + terminal state (i.e. ``in_progress``\\ , ``queued``\\ , etc.). + + + :ivar completion_tokens: Number of completion tokens used over the course of the run. Required. + :vartype completion_tokens: int + :ivar prompt_tokens: Number of prompt tokens used over the course of the run. Required. + :vartype prompt_tokens: int + :ivar total_tokens: Total number of tokens used (prompt + completion). Required. + :vartype total_tokens: int + """ + + completion_tokens: int = rest_field() + """Number of completion tokens used over the course of the run. Required.""" + prompt_tokens: int = rest_field() + """Number of prompt tokens used over the course of the run. Required.""" + total_tokens: int = rest_field() + """Total number of tokens used (prompt + completion). Required.""" + + @overload + def __init__( + self, + *, + completion_tokens: int, + prompt_tokens: int, + total_tokens: int, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunError(_model_base.Model): + """The details of an error as encountered by an agent thread run. + + + :ivar code: The status for the error. Required. + :vartype code: str + :ivar message: The human-readable text associated with the error. Required. + :vartype message: str + """ + + code: str = rest_field() + """The status for the error. Required.""" + message: str = rest_field() + """The human-readable text associated with the error. Required.""" + + @overload + def __init__( + self, + *, + code: str, + message: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStep(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Detailed information about a single step of an agent thread run. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.run.step'. Required. Default value is + "thread.run.step". + :vartype object: str + :ivar type: The type of run step, which can be either message_creation or tool_calls. Required. + Known values are: "message_creation" and "tool_calls". + :vartype type: str or ~azure.ai.client.models.RunStepType + :ivar assistant_id: The ID of the agent associated with the run step. Required. + :vartype assistant_id: str + :ivar thread_id: The ID of the thread that was run. Required. + :vartype thread_id: str + :ivar run_id: The ID of the run that this run step is a part of. Required. + :vartype run_id: str + :ivar status: The status of this run step. Required. Known values are: "in_progress", + "cancelled", "failed", "completed", and "expired". + :vartype status: str or ~azure.ai.client.models.RunStepStatus + :ivar step_details: The details for this run step. Required. + :vartype step_details: ~azure.ai.client.models.RunStepDetails + :ivar last_error: If applicable, information about the last error encountered by this run step. + Required. + :vartype last_error: ~azure.ai.client.models.RunStepError + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar expired_at: The Unix timestamp, in seconds, representing when this item expired. + Required. + :vartype expired_at: ~datetime.datetime + :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. + :vartype completed_at: ~datetime.datetime + :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. + Required. + :vartype cancelled_at: ~datetime.datetime + :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. + :vartype failed_at: ~datetime.datetime + :ivar usage: Usage statistics related to the run step. This value will be ``null`` while the + run step's status is ``in_progress``. + :vartype usage: ~azure.ai.client.models.RunStepCompletionUsage + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run.step"] = rest_field() + """The object type, which is always 'thread.run.step'. Required. Default value is + \"thread.run.step\".""" + type: Union[str, "_models.RunStepType"] = rest_field() + """The type of run step, which can be either message_creation or tool_calls. Required. Known + values are: \"message_creation\" and \"tool_calls\".""" + assistant_id: str = rest_field() + """The ID of the agent associated with the run step. Required.""" + thread_id: str = rest_field() + """The ID of the thread that was run. Required.""" + run_id: str = rest_field() + """The ID of the run that this run step is a part of. Required.""" + status: Union[str, "_models.RunStepStatus"] = rest_field() + """The status of this run step. Required. Known values are: \"in_progress\", \"cancelled\", + \"failed\", \"completed\", and \"expired\".""" + step_details: "_models.RunStepDetails" = rest_field() + """The details for this run step. Required.""" + last_error: "_models.RunStepError" = rest_field() + """If applicable, information about the last error encountered by this run step. Required.""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + expired_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this item expired. Required.""" + completed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this completed. Required.""" + cancelled_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" + failed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this failed. Required.""" + usage: Optional["_models.RunStepCompletionUsage"] = rest_field() + """Usage statistics related to the run step. This value will be ``null`` while the run step's + status is ``in_progress``.""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + type: Union[str, "_models.RunStepType"], + assistant_id: str, + thread_id: str, + run_id: str, + status: Union[str, "_models.RunStepStatus"], + step_details: "_models.RunStepDetails", + last_error: "_models.RunStepError", + created_at: datetime.datetime, + expired_at: datetime.datetime, + completed_at: datetime.datetime, + cancelled_at: datetime.datetime, + failed_at: datetime.datetime, + metadata: Dict[str, str], + usage: Optional["_models.RunStepCompletionUsage"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run.step"] = "thread.run.step" + + +class RunStepCodeInterpreterToolCallOutput(_model_base.Model): + """An abstract representation of an emitted output from a code interpreter tool. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepCodeInterpreterImageOutput, RunStepCodeInterpreterLogOutput + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepCodeInterpreterImageOutput(RunStepCodeInterpreterToolCallOutput, discriminator="image"): + """A representation of an image output emitted by a code interpreter tool in response to a tool + call by the model. + + + :ivar type: The object type, which is always 'image'. Required. Default value is "image". + :vartype type: str + :ivar image: Referential information for the image associated with this output. Required. + :vartype image: ~azure.ai.client.models.RunStepCodeInterpreterImageReference + """ + + type: Literal["image"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'image'. Required. Default value is \"image\".""" + image: "_models.RunStepCodeInterpreterImageReference" = rest_field() + """Referential information for the image associated with this output. Required.""" + + @overload + def __init__( + self, + *, + image: "_models.RunStepCodeInterpreterImageReference", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="image", **kwargs) + + +class RunStepCodeInterpreterImageReference(_model_base.Model): + """An image reference emitted by a code interpreter tool in response to a tool call by the model. + + + :ivar file_id: The ID of the file associated with this image. Required. + :vartype file_id: str + """ + + file_id: str = rest_field() + """The ID of the file associated with this image. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepCodeInterpreterLogOutput(RunStepCodeInterpreterToolCallOutput, discriminator="logs"): + """A representation of a log output emitted by a code interpreter tool in response to a tool call + by the model. + + + :ivar type: The object type, which is always 'logs'. Required. Default value is "logs". + :vartype type: str + :ivar logs: The serialized log output emitted by the code interpreter. Required. + :vartype logs: str + """ + + type: Literal["logs"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'logs'. Required. Default value is \"logs\".""" + logs: str = rest_field() + """The serialized log output emitted by the code interpreter. Required.""" + + @overload + def __init__( + self, + *, + logs: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="logs", **kwargs) + + +class RunStepToolCall(_model_base.Model): + """An abstract representation of a detailed tool call as recorded within a run step for an + existing run. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepCodeInterpreterToolCall, RunStepFileSearchToolCall, RunStepFunctionToolCall + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + id: str = rest_field() + """The ID of the tool call. This ID must be referenced when you submit tool outputs. Required.""" + + @overload + def __init__( + self, + *, + type: str, + id: str, # pylint: disable=redefined-builtin + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepCodeInterpreterToolCall(RunStepToolCall, discriminator="code_interpreter"): + """A record of a call to a code interpreter tool, issued by the model in evaluation of a defined + tool, that + represents inputs and outputs consumed and emitted by the code interpreter. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is + "code_interpreter". + :vartype type: str + :ivar code_interpreter: The details of the tool call to the code interpreter tool. Required. + :vartype code_interpreter: ~azure.ai.client.models.RunStepCodeInterpreterToolCallDetails + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'code_interpreter'. Required. Default value is + \"code_interpreter\".""" + code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails" = rest_field() + """The details of the tool call to the code interpreter tool. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="code_interpreter", **kwargs) + + +class RunStepCodeInterpreterToolCallDetails(_model_base.Model): + """The detailed information about a code interpreter invocation by the model. + + + :ivar input: The input provided by the model to the code interpreter tool. Required. + :vartype input: str + :ivar outputs: The outputs produced by the code interpreter tool back to the model in response + to the tool call. Required. + :vartype outputs: list[~azure.ai.client.models.RunStepCodeInterpreterToolCallOutput] + """ + + input: str = rest_field() + """The input provided by the model to the code interpreter tool. Required.""" + outputs: List["_models.RunStepCodeInterpreterToolCallOutput"] = rest_field() + """The outputs produced by the code interpreter tool back to the model in response to the tool + call. Required.""" + + @overload + def __init__( + self, + *, + input: str, + outputs: List["_models.RunStepCodeInterpreterToolCallOutput"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepCompletionUsage(_model_base.Model): + """Usage statistics related to the run step. + + + :ivar completion_tokens: Number of completion tokens used over the course of the run step. + Required. + :vartype completion_tokens: int + :ivar prompt_tokens: Number of prompt tokens used over the course of the run step. Required. + :vartype prompt_tokens: int + :ivar total_tokens: Total number of tokens used (prompt + completion). Required. + :vartype total_tokens: int + """ + + completion_tokens: int = rest_field() + """Number of completion tokens used over the course of the run step. Required.""" + prompt_tokens: int = rest_field() + """Number of prompt tokens used over the course of the run step. Required.""" + total_tokens: int = rest_field() + """Total number of tokens used (prompt + completion). Required.""" + + @overload + def __init__( + self, + *, + completion_tokens: int, + prompt_tokens: int, + total_tokens: int, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepDelta(_model_base.Model): + """Represents the delta payload in a streaming run step delta chunk. + + :ivar step_details: The details of the run step. + :vartype step_details: ~azure.ai.client.models.RunStepDeltaDetail + """ + + step_details: Optional["_models.RunStepDeltaDetail"] = rest_field() + """The details of the run step.""" + + @overload + def __init__( + self, + *, + step_details: Optional["_models.RunStepDeltaDetail"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepDeltaChunk(_model_base.Model): + """Represents a run step delta i.e. any changed fields on a run step during streaming. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier of the run step, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``thread.run.step.delta``. Required. Default + value is "thread.run.step.delta". + :vartype object: str + :ivar delta: The delta containing the fields that have changed on the run step. Required. + :vartype delta: ~azure.ai.client.models.RunStepDelta + """ + + id: str = rest_field() + """The identifier of the run step, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run.step.delta"] = rest_field() + """The object type, which is always ``thread.run.step.delta``. Required. Default value is + \"thread.run.step.delta\".""" + delta: "_models.RunStepDelta" = rest_field() + """The delta containing the fields that have changed on the run step. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + delta: "_models.RunStepDelta", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run.step.delta"] = "thread.run.step.delta" + + +class RunStepDeltaCodeInterpreterDetailItemObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the Code Interpreter tool call data in a streaming run step's tool calls. + + :ivar input: The input into the Code Interpreter tool call. + :vartype input: str + :ivar outputs: The outputs from the Code Interpreter tool call. Code Interpreter can output one + or more + items, including text (\\ ``logs``\\ ) or images (\\ ``image``\\ ). Each of these are + represented by a + different object type. + :vartype outputs: list[~azure.ai.client.models.RunStepDeltaCodeInterpreterOutput] + """ + + input: Optional[str] = rest_field() + """The input into the Code Interpreter tool call.""" + outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = rest_field() + """The outputs from the Code Interpreter tool call. Code Interpreter can output one or more + items, including text (\ ``logs``\ ) or images (\ ``image``\ ). Each of these are represented + by a + different object type.""" + + @overload + def __init__( + self, + *, + input: Optional[str] = None, + outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterOutput(_model_base.Model): + """The abstract base representation of a streaming run step tool call's Code Interpreter tool + output. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaCodeInterpreterImageOutput, RunStepDeltaCodeInterpreterLogOutput + + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The type of the streaming run step tool call's Code Interpreter output. Required. + Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field() + """The index of the output in the streaming run step tool call's Code Interpreter outputs array. + Required.""" + type: str = rest_discriminator(name="type") + """The type of the streaming run step tool call's Code Interpreter output. Required. Default value + is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterImageOutput(RunStepDeltaCodeInterpreterOutput, discriminator="image"): + """Represents an image output as produced the Code interpreter tool and as represented in a + streaming run step's delta tool calls collection. + + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The object type, which is always "image.". Required. Default value is "image". + :vartype type: str + :ivar image: The image data for the Code Interpreter tool call output. + :vartype image: ~azure.ai.client.models.RunStepDeltaCodeInterpreterImageOutputObject + """ + + type: Literal["image"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"image.\". Required. Default value is \"image\".""" + image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = rest_field() + """The image data for the Code Interpreter tool call output.""" + + @overload + def __init__( + self, + *, + index: int, + image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="image", **kwargs) + + +class RunStepDeltaCodeInterpreterImageOutputObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the data for a streaming run step's Code Interpreter tool call image output. + + :ivar file_id: The file ID for the image. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field() + """The file ID for the image.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterLogOutput(RunStepDeltaCodeInterpreterOutput, discriminator="logs"): + """Represents a log output as produced by the Code Interpreter tool and as represented in a + streaming run step's delta tool calls collection. + + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The type of the object, which is always "logs.". Required. Default value is "logs". + :vartype type: str + :ivar logs: The text output from the Code Interpreter tool call. + :vartype logs: str + """ + + type: Literal["logs"] = rest_discriminator(name="type") # type: ignore + """The type of the object, which is always \"logs.\". Required. Default value is \"logs\".""" + logs: Optional[str] = rest_field() + """The text output from the Code Interpreter tool call.""" + + @overload + def __init__( + self, + *, + index: int, + logs: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="logs", **kwargs) + + +class RunStepDeltaToolCall(_model_base.Model): + """The abstract base representation of a single tool call within a streaming run step's delta tool + call details. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaCodeInterpreterToolCall, RunStepDeltaFileSearchToolCall, + RunStepDeltaFunctionToolCall + + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The type of the tool call detail item in a streaming run step's details. Required. + Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field() + """The index of the tool call detail in the run step's tool_calls array. Required.""" + id: str = rest_field() + """The ID of the tool call, used when submitting outputs to the run. Required.""" + type: str = rest_discriminator(name="type") + """The type of the tool call detail item in a streaming run step's details. Required. Default + value is None.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterToolCall(RunStepDeltaToolCall, discriminator="code_interpreter"): + """Represents a Code Interpreter tool call within a streaming run step's tool call details. + + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "code_interpreter.". Required. Default value is + "code_interpreter". + :vartype type: str + :ivar code_interpreter: The Code Interpreter data for the tool call. + :vartype code_interpreter: ~azure.ai.client.models.RunStepDeltaCodeInterpreterDetailItemObject + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"code_interpreter.\". Required. Default value is + \"code_interpreter\".""" + code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = rest_field() + """The Code Interpreter data for the tool call.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="code_interpreter", **kwargs) + + +class RunStepDeltaDetail(_model_base.Model): + """Represents a single run step detail item in a streaming run step's delta payload. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaMessageCreation, RunStepDeltaToolCallObject + + + :ivar type: The object type for the run step detail object. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type for the run step detail object. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepDeltaFileSearchToolCall(RunStepDeltaToolCall, discriminator="file_search"): + """Represents a file search tool call within a streaming run step's tool call details. + + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "file_search.". Required. Default value is + "file_search". + :vartype type: str + :ivar file_search: Reserved for future use. + :vartype file_search: dict[str, str] + """ + + type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"file_search.\". Required. Default value is \"file_search\".""" + file_search: Optional[Dict[str, str]] = rest_field() + """Reserved for future use.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + file_search: Optional[Dict[str, str]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="file_search", **kwargs) + + +class RunStepDeltaFunction(_model_base.Model): + """Represents the function data in a streaming run step delta's function tool call. + + :ivar name: The name of the function. + :vartype name: str + :ivar arguments: The arguments passed to the function as input. + :vartype arguments: str + :ivar output: The output of the function, null if outputs have not yet been submitted. + :vartype output: str + """ + + name: Optional[str] = rest_field() + """The name of the function.""" + arguments: Optional[str] = rest_field() + """The arguments passed to the function as input.""" + output: Optional[str] = rest_field() + """The output of the function, null if outputs have not yet been submitted.""" + + @overload + def __init__( + self, + *, + name: Optional[str] = None, + arguments: Optional[str] = None, + output: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepDeltaFunctionToolCall(RunStepDeltaToolCall, discriminator="function"): + """Represents a function tool call within a streaming run step's tool call details. + + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "function.". Required. Default value is + "function". + :vartype type: str + :ivar function: The function data for the tool call. + :vartype function: ~azure.ai.client.models.RunStepDeltaFunction + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"function.\". Required. Default value is \"function\".""" + function: Optional["_models.RunStepDeltaFunction"] = rest_field() + """The function data for the tool call.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + function: Optional["_models.RunStepDeltaFunction"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="function", **kwargs) + + +class RunStepDeltaMessageCreation(RunStepDeltaDetail, discriminator="message_creation"): + """Represents a message creation within a streaming run step delta. + + + :ivar type: The object type, which is always "message_creation.". Required. Default value is + "message_creation". + :vartype type: str + :ivar message_creation: The message creation data. + :vartype message_creation: ~azure.ai.client.models.RunStepDeltaMessageCreationObject + """ + + type: Literal["message_creation"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"message_creation.\". Required. Default value is + \"message_creation\".""" + message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = rest_field() + """The message creation data.""" + + @overload + def __init__( + self, + *, + message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="message_creation", **kwargs) + + +class RunStepDeltaMessageCreationObject(_model_base.Model): + """Represents the data within a streaming run step message creation response object. + + :ivar message_id: The ID of the newly-created message. + :vartype message_id: str + """ + + message_id: Optional[str] = rest_field() + """The ID of the newly-created message.""" + + @overload + def __init__( + self, + *, + message_id: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepDeltaToolCallObject(RunStepDeltaDetail, discriminator="tool_calls"): + """Represents an invocation of tool calls as part of a streaming run step. + + + :ivar type: The object type, which is always "tool_calls.". Required. Default value is + "tool_calls". + :vartype type: str + :ivar tool_calls: The collection of tool calls for the tool call detail item. + :vartype tool_calls: list[~azure.ai.client.models.RunStepDeltaToolCall] + """ + + type: Literal["tool_calls"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"tool_calls.\". Required. Default value is \"tool_calls\".""" + tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = rest_field() + """The collection of tool calls for the tool call detail item.""" + + @overload + def __init__( + self, + *, + tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="tool_calls", **kwargs) + + +class RunStepDetails(_model_base.Model): + """An abstract representation of the details for a run step. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepMessageCreationDetails, RunStepToolCallDetails + + + :ivar type: The object type. Required. Known values are: "message_creation" and "tool_calls". + :vartype type: str or ~azure.ai.client.models.RunStepType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Known values are: \"message_creation\" and \"tool_calls\".""" + + @overload + def __init__( + self, + *, + type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepError(_model_base.Model): + """The error information associated with a failed run step. + + + :ivar code: The error code for this error. Required. Known values are: "server_error" and + "rate_limit_exceeded". + :vartype code: str or ~azure.ai.client.models.RunStepErrorCode + :ivar message: The human-readable text associated with this error. Required. + :vartype message: str + """ + + code: Union[str, "_models.RunStepErrorCode"] = rest_field() + """The error code for this error. Required. Known values are: \"server_error\" and + \"rate_limit_exceeded\".""" + message: str = rest_field() + """The human-readable text associated with this error. Required.""" + + @overload + def __init__( + self, + *, + code: Union[str, "_models.RunStepErrorCode"], + message: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepFileSearchToolCall(RunStepToolCall, discriminator="file_search"): + """A record of a call to a file search tool, issued by the model in evaluation of a defined tool, + that represents + executed file search. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'file_search'. Required. Default value is + "file_search". + :vartype type: str + :ivar file_search: Reserved for future use. Required. + :vartype file_search: dict[str, str] + """ + + type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" + file_search: Dict[str, str] = rest_field() + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + file_search: Dict[str, str], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="file_search", **kwargs) + + +class RunStepFunctionToolCall(RunStepToolCall, discriminator="function"): + """A record of a call to a function tool, issued by the model in evaluation of a defined tool, + that represents the inputs + and output consumed and emitted by the specified function. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'function'. Required. Default value is "function". + :vartype type: str + :ivar function: The detailed information about the function called by the model. Required. + :vartype function: ~azure.ai.client.models.RunStepFunctionToolCallDetails + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'function'. Required. Default value is \"function\".""" + function: "_models.RunStepFunctionToolCallDetails" = rest_field() + """The detailed information about the function called by the model. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + function: "_models.RunStepFunctionToolCallDetails", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="function", **kwargs) + + +class RunStepFunctionToolCallDetails(_model_base.Model): + """The detailed information about the function called by the model. + + + :ivar name: The name of the function. Required. + :vartype name: str + :ivar arguments: The arguments that the model requires are provided to the named function. + Required. + :vartype arguments: str + :ivar output: The output of the function, only populated for function calls that have already + have had their outputs submitted. Required. + :vartype output: str + """ + + name: str = rest_field() + """The name of the function. Required.""" + arguments: str = rest_field() + """The arguments that the model requires are provided to the named function. Required.""" + output: str = rest_field() + """The output of the function, only populated for function calls that have already have had their + outputs submitted. Required.""" + + @overload + def __init__( + self, + *, + name: str, + arguments: str, + output: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepMessageCreationDetails(RunStepDetails, discriminator="message_creation"): + """The detailed information associated with a message creation run step. + + + :ivar type: The object type, which is always 'message_creation'. Required. Represents a run + step to create a message. + :vartype type: str or ~azure.ai.client.models.MESSAGE_CREATION + :ivar message_creation: Information about the message creation associated with this run step. + Required. + :vartype message_creation: ~azure.ai.client.models.RunStepMessageCreationReference + """ + + type: Literal[RunStepType.MESSAGE_CREATION] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'message_creation'. Required. Represents a run step to create + a message.""" + message_creation: "_models.RunStepMessageCreationReference" = rest_field() + """Information about the message creation associated with this run step. Required.""" + + @overload + def __init__( + self, + *, + message_creation: "_models.RunStepMessageCreationReference", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type=RunStepType.MESSAGE_CREATION, **kwargs) + + +class RunStepMessageCreationReference(_model_base.Model): + """The details of a message created as a part of a run step. + + + :ivar message_id: The ID of the message created by this run step. Required. + :vartype message_id: str + """ + + message_id: str = rest_field() + """The ID of the message created by this run step. Required.""" + + @overload + def __init__( + self, + *, + message_id: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepToolCallDetails(RunStepDetails, discriminator="tool_calls"): + """The detailed information associated with a run step calling tools. + + + :ivar type: The object type, which is always 'tool_calls'. Required. Represents a run step that + calls tools. + :vartype type: str or ~azure.ai.client.models.TOOL_CALLS + :ivar tool_calls: A list of tool call details for this run step. Required. + :vartype tool_calls: list[~azure.ai.client.models.RunStepToolCall] + """ + + type: Literal[RunStepType.TOOL_CALLS] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'tool_calls'. Required. Represents a run step that calls + tools.""" + tool_calls: List["_models.RunStepToolCall"] = rest_field() + """A list of tool call details for this run step. Required.""" + + @overload + def __init__( + self, + *, + tool_calls: List["_models.RunStepToolCall"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type=RunStepType.TOOL_CALLS, **kwargs) + + +class SubmitToolOutputsAction(RequiredAction, discriminator="submit_tool_outputs"): + """The details for required tool calls that must be submitted for an agent thread run to continue. + + + :ivar type: The object type, which is always 'submit_tool_outputs'. Required. Default value is + "submit_tool_outputs". + :vartype type: str + :ivar submit_tool_outputs: The details describing tools that should be called to submit tool + outputs. Required. + :vartype submit_tool_outputs: ~azure.ai.client.models.SubmitToolOutputsDetails + """ + + type: Literal["submit_tool_outputs"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'submit_tool_outputs'. Required. Default value is + \"submit_tool_outputs\".""" + submit_tool_outputs: "_models.SubmitToolOutputsDetails" = rest_field() + """The details describing tools that should be called to submit tool outputs. Required.""" + + @overload + def __init__( + self, + *, + submit_tool_outputs: "_models.SubmitToolOutputsDetails", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="submit_tool_outputs", **kwargs) + + +class SubmitToolOutputsDetails(_model_base.Model): + """The details describing tools that should be called to submit tool outputs. + + + :ivar tool_calls: The list of tool calls that must be resolved for the agent thread run to + continue. Required. + :vartype tool_calls: list[~azure.ai.client.models.RequiredToolCall] + """ + + tool_calls: List["_models.RequiredToolCall"] = rest_field() + """The list of tool calls that must be resolved for the agent thread run to continue. Required.""" + + @overload + def __init__( + self, + *, + tool_calls: List["_models.RequiredToolCall"], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class SystemData(_model_base.Model): + """Metadata pertaining to creation and last modification of the resource. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar created_at: The timestamp the resource was created at. + :vartype created_at: ~datetime.datetime + :ivar created_by: The identity that created the resource. + :vartype created_by: str + :ivar created_by_type: The identity type that created the resource. + :vartype created_by_type: str + :ivar last_modified_at: The timestamp of resource last modification (UTC). + :vartype last_modified_at: ~datetime.datetime + """ + + created_at: Optional[datetime.datetime] = rest_field(name="createdAt", visibility=["read"], format="rfc3339") + """The timestamp the resource was created at.""" + created_by: Optional[str] = rest_field(name="createdBy", visibility=["read"]) + """The identity that created the resource.""" + created_by_type: Optional[str] = rest_field(name="createdByType", visibility=["read"]) + """The identity type that created the resource.""" + last_modified_at: Optional[datetime.datetime] = rest_field( + name="lastModifiedAt", visibility=["read"], format="rfc3339" + ) + """The timestamp of resource last modification (UTC).""" + + +class ThreadDeletionStatus(_model_base.Model): + """The status of a thread deletion operation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'thread.deleted'. Required. Default value is + "thread.deleted". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["thread.deleted"] = rest_field() + """The object type, which is always 'thread.deleted'. Required. Default value is + \"thread.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.deleted"] = "thread.deleted" + + +class ThreadMessage(_model_base.Model): # pylint: disable=too-many-instance-attributes + """A single, existing message within an agent thread. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.message'. Required. Default value is + "thread.message". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar thread_id: The ID of the thread that this message belongs to. Required. + :vartype thread_id: str + :ivar status: The status of the message. Required. Known values are: "in_progress", + "incomplete", and "completed". + :vartype status: str or ~azure.ai.client.models.MessageStatus + :ivar incomplete_details: On an incomplete message, details about why the message is + incomplete. Required. + :vartype incomplete_details: ~azure.ai.client.models.MessageIncompleteDetails + :ivar completed_at: The Unix timestamp (in seconds) for when the message was completed. + Required. + :vartype completed_at: ~datetime.datetime + :ivar incomplete_at: The Unix timestamp (in seconds) for when the message was marked as + incomplete. Required. + :vartype incomplete_at: ~datetime.datetime + :ivar role: The role associated with the agent thread message. Required. Known values are: + "user" and "assistant". + :vartype role: str or ~azure.ai.client.models.MessageRole + :ivar content: The list of content items associated with the agent thread message. Required. + :vartype content: list[~azure.ai.client.models.MessageContent] + :ivar assistant_id: If applicable, the ID of the agent that authored this message. Required. + :vartype assistant_id: str + :ivar run_id: If applicable, the ID of the run associated with the authoring of this message. + Required. + :vartype run_id: str + :ivar attachments: A list of files attached to the message, and the tools they were added to. + Required. + :vartype attachments: list[~azure.ai.client.models.MessageAttachment] + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.message"] = rest_field() + """The object type, which is always 'thread.message'. Required. Default value is + \"thread.message\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + thread_id: str = rest_field() + """The ID of the thread that this message belongs to. Required.""" + status: Union[str, "_models.MessageStatus"] = rest_field() + """The status of the message. Required. Known values are: \"in_progress\", \"incomplete\", and + \"completed\".""" + incomplete_details: "_models.MessageIncompleteDetails" = rest_field() + """On an incomplete message, details about why the message is incomplete. Required.""" + completed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the message was completed. Required.""" + incomplete_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the message was marked as incomplete. Required.""" + role: Union[str, "_models.MessageRole"] = rest_field() + """The role associated with the agent thread message. Required. Known values are: \"user\" and + \"assistant\".""" + content: List["_models.MessageContent"] = rest_field() + """The list of content items associated with the agent thread message. Required.""" + assistant_id: str = rest_field() + """If applicable, the ID of the agent that authored this message. Required.""" + run_id: str = rest_field() + """If applicable, the ID of the run associated with the authoring of this message. Required.""" + attachments: List["_models.MessageAttachment"] = rest_field() + """A list of files attached to the message, and the tools they were added to. Required.""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + thread_id: str, + status: Union[str, "_models.MessageStatus"], + incomplete_details: "_models.MessageIncompleteDetails", + completed_at: datetime.datetime, + incomplete_at: datetime.datetime, + role: Union[str, "_models.MessageRole"], + content: List["_models.MessageContent"], + assistant_id: str, + run_id: str, + attachments: List["_models.MessageAttachment"], + metadata: Dict[str, str], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.message"] = "thread.message" + + +class ThreadMessageOptions(_model_base.Model): + """A single message within an agent thread, as provided during that thread's creation for its + initial state. + + All required parameters must be populated in order to send to server. + + :ivar role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Required. Known values are: "user" and "assistant". + :vartype role: str or ~azure.ai.client.models.MessageRole + :ivar content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :vartype content: str + :ivar attachments: A list of files attached to the message, and the tools they should be added + to. + :vartype attachments: list[~azure.ai.client.models.MessageAttachment] + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. + :vartype metadata: dict[str, str] + """ + + role: Union[str, "_models.MessageRole"] = rest_field() + """The role of the entity that is creating the message. Allowed values include: + + + * ``user``\ : Indicates the message is sent by an actual user and should be used in most cases + to represent user-generated messages. + * ``assistant``\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Required. Known values are: \"user\" and \"assistant\".""" + content: str = rest_field() + """The textual content of the initial message. Currently, robust input including images and + annotated text may only be provided via + a separate call to the create message API. Required.""" + attachments: Optional[List["_models.MessageAttachment"]] = rest_field() + """A list of files attached to the message, and the tools they should be added to.""" + metadata: Optional[Dict[str, str]] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length.""" + + @overload + def __init__( + self, + *, + role: Union[str, "_models.MessageRole"], + content: str, + attachments: Optional[List["_models.MessageAttachment"]] = None, + metadata: Optional[Dict[str, str]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ThreadRun(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Data representing a single evaluation run of an agent thread. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.run'. Required. Default value is + "thread.run". + :vartype object: str + :ivar thread_id: The ID of the thread associated with this run. Required. + :vartype thread_id: str + :ivar assistant_id: The ID of the agent associated with the thread this run was performed + against. Required. + :vartype assistant_id: str + :ivar status: The status of the agent thread run. Required. Known values are: "queued", + "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", and + "expired". + :vartype status: str or ~azure.ai.client.models.RunStatus + :ivar required_action: The details of the action required for the agent thread run to continue. + :vartype required_action: ~azure.ai.client.models.RequiredAction + :ivar last_error: The last error, if any, encountered by this agent thread run. Required. + :vartype last_error: ~azure.ai.client.models.RunError + :ivar model: The ID of the model to use. Required. + :vartype model: str + :ivar instructions: The overridden system instructions used for this agent thread run. + Required. + :vartype instructions: str + :ivar tools: The overridden enabled tools used for this agent thread run. Required. + :vartype tools: list[~azure.ai.client.models.ToolDefinition] + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar expires_at: The Unix timestamp, in seconds, representing when this item expires. + Required. + :vartype expires_at: ~datetime.datetime + :ivar started_at: The Unix timestamp, in seconds, representing when this item was started. + Required. + :vartype started_at: ~datetime.datetime + :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. + :vartype completed_at: ~datetime.datetime + :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. + Required. + :vartype cancelled_at: ~datetime.datetime + :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. + :vartype failed_at: ~datetime.datetime + :ivar incomplete_details: Details on why the run is incomplete. Will be ``null`` if the run is + not incomplete. Required. Known values are: "max_completion_tokens" and "max_prompt_tokens". + :vartype incomplete_details: str or ~azure.ai.client.models.IncompleteRunDetails + :ivar usage: Usage statistics related to the run. This value will be ``null`` if the run is not + in a terminal state (i.e. ``in_progress``\\ , ``queued``\\ , etc.). Required. + :vartype usage: ~azure.ai.client.models.RunCompletionUsage + :ivar temperature: The sampling temperature used for this run. If not set, defaults to 1. + :vartype temperature: float + :ivar top_p: The nucleus sampling value used for this run. If not set, defaults to 1. + :vartype top_p: float + :ivar max_prompt_tokens: The maximum number of prompt tokens specified to have been used over + the course of the run. Required. + :vartype max_prompt_tokens: int + :ivar max_completion_tokens: The maximum number of completion tokens specified to have been + used over the course of the run. Required. + :vartype max_completion_tokens: int + :ivar truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Required. + :vartype truncation_strategy: ~azure.ai.client.models.TruncationObject + :ivar tool_choice: Controls whether or not and which tool is called by the model. Required. Is + one of the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice + :vartype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :ivar response_format: The response format of the tool calls used in this run. Required. Is one + of the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat + :vartype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode or + ~azure.ai.client.models.AgentsApiResponseFormat + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + :ivar tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. + :vartype tool_resources: ~azure.ai.client.models.UpdateToolResourcesOptions + :ivar parallel_tool_calls: Determines if tools can be executed in parallel within the run. + :vartype parallel_tool_calls: bool + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run"] = rest_field() + """The object type, which is always 'thread.run'. Required. Default value is \"thread.run\".""" + thread_id: str = rest_field() + """The ID of the thread associated with this run. Required.""" + assistant_id: str = rest_field() + """The ID of the agent associated with the thread this run was performed against. Required.""" + status: Union[str, "_models.RunStatus"] = rest_field() + """The status of the agent thread run. Required. Known values are: \"queued\", \"in_progress\", + \"requires_action\", \"cancelling\", \"cancelled\", \"failed\", \"completed\", and \"expired\".""" + required_action: Optional["_models.RequiredAction"] = rest_field() + """The details of the action required for the agent thread run to continue.""" + last_error: "_models.RunError" = rest_field() + """The last error, if any, encountered by this agent thread run. Required.""" + model: str = rest_field() + """The ID of the model to use. Required.""" + instructions: str = rest_field() + """The overridden system instructions used for this agent thread run. Required.""" + tools: List["_models.ToolDefinition"] = rest_field() + """The overridden enabled tools used for this agent thread run. Required.""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + expires_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this item expires. Required.""" + started_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this item was started. Required.""" + completed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this completed. Required.""" + cancelled_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" + failed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this failed. Required.""" + incomplete_details: Union[str, "_models.IncompleteRunDetails"] = rest_field() + """Details on why the run is incomplete. Will be ``null`` if the run is not incomplete. Required. + Known values are: \"max_completion_tokens\" and \"max_prompt_tokens\".""" + usage: "_models.RunCompletionUsage" = rest_field() + """Usage statistics related to the run. This value will be ``null`` if the run is not in a + terminal state (i.e. ``in_progress``\ , ``queued``\ , etc.). Required.""" + temperature: Optional[float] = rest_field() + """The sampling temperature used for this run. If not set, defaults to 1.""" + top_p: Optional[float] = rest_field() + """The nucleus sampling value used for this run. If not set, defaults to 1.""" + max_prompt_tokens: int = rest_field() + """The maximum number of prompt tokens specified to have been used over the course of the run. + Required.""" + max_completion_tokens: int = rest_field() + """The maximum number of completion tokens specified to have been used over the course of the run. + Required.""" + truncation_strategy: "_models.TruncationObject" = rest_field() + """The strategy to use for dropping messages as the context windows moves forward. Required.""" + tool_choice: "_types.AgentsApiToolChoiceOption" = rest_field() + """Controls whether or not and which tool is called by the model. Required. Is one of the + following types: str, Union[str, \"_models.AgentsApiToolChoiceOptionMode\"], + AgentsNamedToolChoice""" + response_format: "_types.AgentsApiResponseFormatOption" = rest_field() + """The response format of the tool calls used in this run. Required. Is one of the following + types: str, Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + tool_resources: Optional["_models.UpdateToolResourcesOptions"] = rest_field() + """Override the tools the agent can use for this run. This is useful for modifying the behavior on + a per-run basis.""" + parallel_tool_calls: Optional[bool] = rest_field(name="parallelToolCalls") + """Determines if tools can be executed in parallel within the run.""" + + @overload + def __init__( # pylint: disable=too-many-locals + self, + *, + id: str, # pylint: disable=redefined-builtin + thread_id: str, + assistant_id: str, + status: Union[str, "_models.RunStatus"], + last_error: "_models.RunError", + model: str, + instructions: str, + tools: List["_models.ToolDefinition"], + created_at: datetime.datetime, + expires_at: datetime.datetime, + started_at: datetime.datetime, + completed_at: datetime.datetime, + cancelled_at: datetime.datetime, + failed_at: datetime.datetime, + incomplete_details: Union[str, "_models.IncompleteRunDetails"], + usage: "_models.RunCompletionUsage", + max_prompt_tokens: int, + max_completion_tokens: int, + truncation_strategy: "_models.TruncationObject", + tool_choice: "_types.AgentsApiToolChoiceOption", + response_format: "_types.AgentsApiResponseFormatOption", + metadata: Dict[str, str], + required_action: Optional["_models.RequiredAction"] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + tool_resources: Optional["_models.UpdateToolResourcesOptions"] = None, + parallel_tool_calls: Optional[bool] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run"] = "thread.run" + + +class ToolOutput(_model_base.Model): + """The data provided during a tool outputs submission to resolve pending tool calls and allow the + model to continue. + + :ivar tool_call_id: The ID of the tool call being resolved, as provided in the tool calls of a + required action from a run. + :vartype tool_call_id: str + :ivar output: The output from the tool to be submitted. + :vartype output: str + """ + + tool_call_id: Optional[str] = rest_field() + """The ID of the tool call being resolved, as provided in the tool calls of a required action from + a run.""" + output: Optional[str] = rest_field() + """The output from the tool to be submitted.""" + + @overload + def __init__( + self, + *, + tool_call_id: Optional[str] = None, + output: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ToolResources(_model_base.Model): + """A set of resources that are used by the agent's tools. The resources are specific to the type + of + tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` + tool requires a list of vector store IDs. + + :ivar code_interpreter: Resources to be used by the ``code_interpreter tool`` consisting of + file IDs. + :vartype code_interpreter: ~azure.ai.client.models.CodeInterpreterToolResource + :ivar file_search: Resources to be used by the ``file_search`` tool consisting of vector store + IDs. + :vartype file_search: ~azure.ai.client.models.FileSearchToolResource + """ + + code_interpreter: Optional["_models.CodeInterpreterToolResource"] = rest_field() + """Resources to be used by the ``code_interpreter tool`` consisting of file IDs.""" + file_search: Optional["_models.FileSearchToolResource"] = rest_field() + """Resources to be used by the ``file_search`` tool consisting of vector store IDs.""" + + @overload + def __init__( + self, + *, + code_interpreter: Optional["_models.CodeInterpreterToolResource"] = None, + file_search: Optional["_models.FileSearchToolResource"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class TruncationObject(_model_base.Model): + """Controls for how a thread will be truncated prior to the run. Use this to control the initial + context window of the run. + + + :ivar type: The truncation strategy to use for the thread. The default is ``auto``. If set to + ``last_messages``\\ , the thread will + be truncated to the ``lastMessages`` count most recent messages in the thread. When set to + ``auto``\\ , messages in the middle of the thread + will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known + values are: "auto" and "last_messages". + :vartype type: str or ~azure.ai.client.models.TruncationStrategy + :ivar last_messages: The number of most recent messages from the thread when constructing the + context for the run. + :vartype last_messages: int + """ + + type: Union[str, "_models.TruncationStrategy"] = rest_field() + """The truncation strategy to use for the thread. The default is ``auto``. If set to + ``last_messages``\ , the thread will + be truncated to the ``lastMessages`` count most recent messages in the thread. When set to + ``auto``\ , messages in the middle of the thread + will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known + values are: \"auto\" and \"last_messages\".""" + last_messages: Optional[int] = rest_field() + """The number of most recent messages from the thread when constructing the context for the run.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.TruncationStrategy"], + last_messages: Optional[int] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class UpdateCodeInterpreterToolResourceOptions(_model_base.Model): + """Request object to update ``code_interpreted`` tool resources. + + :ivar file_ids: A list of file IDs to override the current list of the agent. + :vartype file_ids: list[str] + """ + + file_ids: Optional[List[str]] = rest_field() + """A list of file IDs to override the current list of the agent.""" + + @overload + def __init__( + self, + *, + file_ids: Optional[List[str]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class UpdateEvaluationRequest(_model_base.Model): + """Update Evaluation Request. + + All required parameters must be populated in order to send to server. + + :ivar tags: Tags to be updated. Required. + :vartype tags: dict[str, str] + :ivar display_name: Display Name. Required. + :vartype display_name: str + :ivar description: Description. Required. + :vartype description: str + """ + + tags: Dict[str, str] = rest_field() + """Tags to be updated. Required.""" + display_name: str = rest_field(name="displayName") + """Display Name. Required.""" + description: str = rest_field() + """Description. Required.""" + + @overload + def __init__( + self, + *, + tags: Dict[str, str], + display_name: str, + description: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class UpdateFileSearchToolResourceOptions(_model_base.Model): + """Request object to update ``file_search`` tool resources. + + :ivar vector_store_ids: A list of vector store IDs to override the current list of the agent. + :vartype vector_store_ids: list[str] + """ + + vector_store_ids: Optional[List[str]] = rest_field() + """A list of vector store IDs to override the current list of the agent.""" + + @overload + def __init__( + self, + *, + vector_store_ids: Optional[List[str]] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class UpdateToolResourcesOptions(_model_base.Model): + """Request object. A set of resources that are used by the agent's tools. The resources are + specific to the type of tool. + For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list of + vector store IDs. + + :ivar code_interpreter: Overrides the list of file IDs made available to the + ``code_interpreter`` tool. There can be a maximum of 20 files + associated with the tool. + :vartype code_interpreter: ~azure.ai.client.models.UpdateCodeInterpreterToolResourceOptions + :ivar file_search: Overrides the vector store attached to this agent. There can be a maximum of + 1 vector store attached to the agent. + :vartype file_search: ~azure.ai.client.models.UpdateFileSearchToolResourceOptions + """ + + code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = rest_field() + """Overrides the list of file IDs made available to the ``code_interpreter`` tool. There can be a + maximum of 20 files + associated with the tool.""" + file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = rest_field() + """Overrides the vector store attached to this agent. There can be a maximum of 1 vector store + attached to the agent.""" + + @overload + def __init__( + self, + *, + code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = None, + file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class VectorStore(_model_base.Model): # pylint: disable=too-many-instance-attributes + """A vector store is a collection of processed files can be used by the ``file_search`` tool. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store``. Required. Default value is + "vector_store". + :vartype object: str + :ivar created_at: The Unix timestamp (in seconds) for when the vector store was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar name: The name of the vector store. Required. + :vartype name: str + :ivar usage_bytes: The total number of bytes used by the files in the vector store. Required. + :vartype usage_bytes: int + :ivar file_counts: Files count grouped by status processed or being processed by this vector + store. Required. + :vartype file_counts: ~azure.ai.client.models.VectorStoreFileCount + :ivar status: The status of the vector store, which can be either ``expired``\\ , + ``in_progress``\\ , or ``completed``. A status of ``completed`` indicates that the vector store + is ready for use. Required. Known values are: "expired", "in_progress", and "completed". + :vartype status: str or ~azure.ai.client.models.VectorStoreStatus + :ivar expires_after: Details on when this vector store expires. + :vartype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy + :ivar expires_at: The Unix timestamp (in seconds) for when the vector store will expire. + :vartype expires_at: ~datetime.datetime + :ivar last_active_at: The Unix timestamp (in seconds) for when the vector store was last + active. Required. + :vartype last_active_at: ~datetime.datetime + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store"] = rest_field() + """The object type, which is always ``vector_store``. Required. Default value is \"vector_store\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store was created. Required.""" + name: str = rest_field() + """The name of the vector store. Required.""" + usage_bytes: int = rest_field() + """The total number of bytes used by the files in the vector store. Required.""" + file_counts: "_models.VectorStoreFileCount" = rest_field() + """Files count grouped by status processed or being processed by this vector store. Required.""" + status: Union[str, "_models.VectorStoreStatus"] = rest_field() + """The status of the vector store, which can be either ``expired``\ , ``in_progress``\ , or + ``completed``. A status of ``completed`` indicates that the vector store is ready for use. + Required. Known values are: \"expired\", \"in_progress\", and \"completed\".""" + expires_after: Optional["_models.VectorStoreExpirationPolicy"] = rest_field() + """Details on when this vector store expires.""" + expires_at: Optional[datetime.datetime] = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store will expire.""" + last_active_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store was last active. Required.""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + name: str, + usage_bytes: int, + file_counts: "_models.VectorStoreFileCount", + status: Union[str, "_models.VectorStoreStatus"], + last_active_at: datetime.datetime, + metadata: Dict[str, str], + expires_after: Optional["_models.VectorStoreExpirationPolicy"] = None, + expires_at: Optional[datetime.datetime] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store"] = "vector_store" + + +class VectorStoreChunkingStrategyRequest(_model_base.Model): + """An abstract representation of a vector store chunking strategy configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + VectorStoreAutoChunkingStrategyRequest, VectorStoreStaticChunkingStrategyRequest + + All required parameters must be populated in order to send to server. + + :ivar type: The object type. Required. Known values are: "auto" and "static". + :vartype type: str or ~azure.ai.client.models.VectorStoreChunkingStrategyRequestType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Known values are: \"auto\" and \"static\".""" + + @overload + def __init__( + self, + *, + type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class VectorStoreAutoChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="auto"): + """The default strategy. This strategy currently uses a max_chunk_size_tokens of 800 and + chunk_overlap_tokens of 400. + + All required parameters must be populated in order to send to server. + + :ivar type: The object type, which is always 'auto'. Required. + :vartype type: str or ~azure.ai.client.models.AUTO + """ + + type: Literal[VectorStoreChunkingStrategyRequestType.AUTO] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'auto'. Required.""" + + @overload + def __init__( + self, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.AUTO, **kwargs) + + +class VectorStoreChunkingStrategyResponse(_model_base.Model): + """An abstract representation of a vector store chunking strategy configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + VectorStoreAutoChunkingStrategyResponse, VectorStoreStaticChunkingStrategyResponse + + + :ivar type: The object type. Required. Known values are: "other" and "static". + :vartype type: str or ~azure.ai.client.models.VectorStoreChunkingStrategyResponseType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Known values are: \"other\" and \"static\".""" + + @overload + def __init__( + self, + *, + type: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class VectorStoreAutoChunkingStrategyResponse(VectorStoreChunkingStrategyResponse, discriminator="other"): + """This is returned when the chunking strategy is unknown. Typically, this is because the file was + indexed before the chunking_strategy concept was introduced in the API. + + + :ivar type: The object type, which is always 'other'. Required. + :vartype type: str or ~azure.ai.client.models.OTHER + """ + + type: Literal[VectorStoreChunkingStrategyResponseType.OTHER] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'other'. Required.""" + + @overload + def __init__( + self, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.OTHER, **kwargs) + + +class VectorStoreDeletionStatus(_model_base.Model): + """Response object for deleting a vector store. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value + is "vector_store.deleted". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["vector_store.deleted"] = rest_field() + """The object type, which is always 'vector_store.deleted'. Required. Default value is + \"vector_store.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.deleted"] = "vector_store.deleted" + + +class VectorStoreExpirationPolicy(_model_base.Model): + """The expiration policy for a vector store. + + + :ivar anchor: Anchor timestamp after which the expiration policy applies. Supported anchors: + ``last_active_at``. Required. "last_active_at" + :vartype anchor: str or ~azure.ai.client.models.VectorStoreExpirationPolicyAnchor + :ivar days: The anchor timestamp after which the expiration policy applies. Required. + :vartype days: int + """ + + anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"] = rest_field() + """Anchor timestamp after which the expiration policy applies. Supported anchors: + ``last_active_at``. Required. \"last_active_at\"""" + days: int = rest_field() + """The anchor timestamp after which the expiration policy applies. Required.""" + + @overload + def __init__( + self, + *, + anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"], + days: int, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class VectorStoreFile(_model_base.Model): + """Description of a file attached to a vector store. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store.file``. Required. Default value + is "vector_store.file". + :vartype object: str + :ivar usage_bytes: The total vector store usage in bytes. Note that this may be different from + the original file + size. Required. + :vartype usage_bytes: int + :ivar created_at: The Unix timestamp (in seconds) for when the vector store file was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. + :vartype vector_store_id: str + :ivar status: The status of the vector store file, which can be either ``in_progress``\\ , + ``completed``\\ , ``cancelled``\\ , or ``failed``. The status ``completed`` indicates that the + vector store file is ready for use. Required. Known values are: "in_progress", "completed", + "failed", and "cancelled". + :vartype status: str or ~azure.ai.client.models.VectorStoreFileStatus + :ivar last_error: The last error associated with this vector store file. Will be ``null`` if + there are no errors. Required. + :vartype last_error: ~azure.ai.client.models.VectorStoreFileError + :ivar chunking_strategy: The strategy used to chunk the file. Required. + :vartype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyResponse + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store.file"] = rest_field() + """The object type, which is always ``vector_store.file``. Required. Default value is + \"vector_store.file\".""" + usage_bytes: int = rest_field() + """The total vector store usage in bytes. Note that this may be different from the original file + size. Required.""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store file was created. Required.""" + vector_store_id: str = rest_field() + """The ID of the vector store that the file is attached to. Required.""" + status: Union[str, "_models.VectorStoreFileStatus"] = rest_field() + """The status of the vector store file, which can be either ``in_progress``\ , ``completed``\ , + ``cancelled``\ , or ``failed``. The status ``completed`` indicates that the vector store file + is ready for use. Required. Known values are: \"in_progress\", \"completed\", \"failed\", and + \"cancelled\".""" + last_error: "_models.VectorStoreFileError" = rest_field() + """The last error associated with this vector store file. Will be ``null`` if there are no errors. + Required.""" + chunking_strategy: "_models.VectorStoreChunkingStrategyResponse" = rest_field() + """The strategy used to chunk the file. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + usage_bytes: int, + created_at: datetime.datetime, + vector_store_id: str, + status: Union[str, "_models.VectorStoreFileStatus"], + last_error: "_models.VectorStoreFileError", + chunking_strategy: "_models.VectorStoreChunkingStrategyResponse", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.file"] = "vector_store.file" + + +class VectorStoreFileBatch(_model_base.Model): + """A batch of files attached to a vector store. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store.file_batch``. Required. Default + value is "vector_store.files_batch". + :vartype object: str + :ivar created_at: The Unix timestamp (in seconds) for when the vector store files batch was + created. Required. + :vartype created_at: ~datetime.datetime + :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. + :vartype vector_store_id: str + :ivar status: The status of the vector store files batch, which can be either ``in_progress``\\ + , ``completed``\\ , ``cancelled`` or ``failed``. Required. Known values are: "in_progress", + "completed", "cancelled", and "failed". + :vartype status: str or ~azure.ai.client.models.VectorStoreFileBatchStatus + :ivar file_counts: Files count grouped by status processed or being processed by this vector + store. Required. + :vartype file_counts: ~azure.ai.client.models.VectorStoreFileCount + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store.files_batch"] = rest_field() + """The object type, which is always ``vector_store.file_batch``. Required. Default value is + \"vector_store.files_batch\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store files batch was created. Required.""" + vector_store_id: str = rest_field() + """The ID of the vector store that the file is attached to. Required.""" + status: Union[str, "_models.VectorStoreFileBatchStatus"] = rest_field() + """The status of the vector store files batch, which can be either ``in_progress``\ , + ``completed``\ , ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", + \"completed\", \"cancelled\", and \"failed\".""" + file_counts: "_models.VectorStoreFileCount" = rest_field() + """Files count grouped by status processed or being processed by this vector store. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + vector_store_id: str, + status: Union[str, "_models.VectorStoreFileBatchStatus"], + file_counts: "_models.VectorStoreFileCount", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.files_batch"] = "vector_store.files_batch" + + +class VectorStoreFileCount(_model_base.Model): + """Counts of files processed or being processed by this vector store grouped by status. + + + :ivar in_progress: The number of files that are currently being processed. Required. + :vartype in_progress: int + :ivar completed: The number of files that have been successfully processed. Required. + :vartype completed: int + :ivar failed: The number of files that have failed to process. Required. + :vartype failed: int + :ivar cancelled: The number of files that were cancelled. Required. + :vartype cancelled: int + :ivar total: The total number of files. Required. + :vartype total: int + """ + + in_progress: int = rest_field() + """The number of files that are currently being processed. Required.""" + completed: int = rest_field() + """The number of files that have been successfully processed. Required.""" + failed: int = rest_field() + """The number of files that have failed to process. Required.""" + cancelled: int = rest_field() + """The number of files that were cancelled. Required.""" + total: int = rest_field() + """The total number of files. Required.""" + + @overload + def __init__( + self, + *, + in_progress: int, + completed: int, + failed: int, + cancelled: int, + total: int, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class VectorStoreFileDeletionStatus(_model_base.Model): + """Response object for deleting a vector store file relationship. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value + is "vector_store.file.deleted". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["vector_store.file.deleted"] = rest_field() + """The object type, which is always 'vector_store.deleted'. Required. Default value is + \"vector_store.file.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.file.deleted"] = "vector_store.file.deleted" + + +class VectorStoreFileError(_model_base.Model): + """Details on the error that may have ocurred while processing a file for this vector store. + + + :ivar code: One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: + "internal_error", "file_not_found", "parsing_error", and "unhandled_mime_type". + :vartype code: str or ~azure.ai.client.models.VectorStoreFileErrorCode + :ivar message: A human-readable description of the error. Required. + :vartype message: str + """ + + code: Union[str, "_models.VectorStoreFileErrorCode"] = rest_field() + """One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: + \"internal_error\", \"file_not_found\", \"parsing_error\", and \"unhandled_mime_type\".""" + message: str = rest_field() + """A human-readable description of the error. Required.""" + + @overload + def __init__( + self, + *, + code: Union[str, "_models.VectorStoreFileErrorCode"], + message: str, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class VectorStoreStaticChunkingStrategyOptions(_model_base.Model): + """Options to configure a vector store static chunking strategy. + + + :ivar max_chunk_size_tokens: The maximum number of tokens in each chunk. The default value is + 800. The minimum value is 100 and the maximum value is 4096. Required. + :vartype max_chunk_size_tokens: int + :ivar chunk_overlap_tokens: The number of tokens that overlap between chunks. The default value + is 400. + Note that the overlap must not exceed half of max_chunk_size_tokens. *. Required. + :vartype chunk_overlap_tokens: int + """ + + max_chunk_size_tokens: int = rest_field() + """The maximum number of tokens in each chunk. The default value is 800. The minimum value is 100 + and the maximum value is 4096. Required.""" + chunk_overlap_tokens: int = rest_field() + """The number of tokens that overlap between chunks. The default value is 400. + Note that the overlap must not exceed half of max_chunk_size_tokens. *. Required.""" + + @overload + def __init__( + self, + *, + max_chunk_size_tokens: int, + chunk_overlap_tokens: int, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class VectorStoreStaticChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="static"): + """A statically configured chunking strategy. + + All required parameters must be populated in order to send to server. + + :ivar type: The object type, which is always 'static'. Required. + :vartype type: str or ~azure.ai.client.models.STATIC + :ivar static: The options for the static chunking strategy. Required. + :vartype static: ~azure.ai.client.models.VectorStoreStaticChunkingStrategyOptions + """ + + type: Literal[VectorStoreChunkingStrategyRequestType.STATIC] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'static'. Required.""" + static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field() + """The options for the static chunking strategy. Required.""" + + @overload + def __init__( + self, + *, + static: "_models.VectorStoreStaticChunkingStrategyOptions", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.STATIC, **kwargs) + + +class VectorStoreStaticChunkingStrategyResponse( + VectorStoreChunkingStrategyResponse, discriminator="static" +): # pylint: disable=name-too-long + """A statically configured chunking strategy. + + + :ivar type: The object type, which is always 'static'. Required. + :vartype type: str or ~azure.ai.client.models.STATIC + :ivar static: The options for the static chunking strategy. Required. + :vartype static: ~azure.ai.client.models.VectorStoreStaticChunkingStrategyOptions + """ + + type: Literal[VectorStoreChunkingStrategyResponseType.STATIC] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'static'. Required.""" + static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field() + """The options for the static chunking strategy. Required.""" + + @overload + def __init__( + self, + *, + static: "_models.VectorStoreStaticChunkingStrategyOptions", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.STATIC, **kwargs) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index 47d82daf89b3..78a4226595c5 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -11,6 +11,25 @@ from azure.core.credentials import TokenCredential from ._models import ConnectionsListSecretsResponse +from ._enums import AgentStreamEvent +from ._models import MessageDeltaChunk, ThreadRun, RunStep, ThreadMessage, RunStepDeltaChunk +from ._models import ( + FunctionToolDefinition, + FunctionDefinition, + ToolDefinition, + ToolResources, + FileSearchToolDefinition, + FileSearchToolResource, + CodeInterpreterToolDefinition, + CodeInterpreterToolResource, + RequiredFunctionToolCall, +) + +from abc import ABC, abstractmethod +from typing import AsyncIterator, List, Dict, Any, Type, Optional, Iterator, Tuple, get_origin + +import inspect, json, logging + class EndpointProperties: @@ -45,7 +64,697 @@ def __str__(self): return out -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +# Define type_map to translate Python type annotations to JSON Schema types +type_map = { + "str": "string", + "int": "integer", + "float": "number", + "bool": "boolean", + "bytes": "string", # Typically encoded as base64-encoded strings in JSON + "NoneType": "null", + "datetime": "string", # Use format "date-time" + "date": "string", # Use format "date" + "UUID": "string", # Use format "uuid" +} + + +def _map_type(annotation) -> str: + + if annotation == inspect.Parameter.empty: + return "string" # Default type if annotation is missing + + origin = get_origin(annotation) + + if origin in {list, List}: + return "array" + elif origin in {dict, Dict}: + return "object" + elif hasattr(annotation, "__name__"): + return type_map.get(annotation.__name__, "string") + elif isinstance(annotation, type): + return type_map.get(annotation.__name__, "string") + + return "string" # Fallback to "string" if type is unrecognized + + +class Tool(ABC): + """ + An abstract class representing a tool that can be used by an agent. + """ + + @property + @abstractmethod + def definitions(self) -> List[ToolDefinition]: + """Get the tool definitions.""" + pass + + @property + @abstractmethod + def resources(self) -> ToolResources: + """Get the tool resources.""" + pass + + @abstractmethod + def execute(self, tool_call: Any) -> Any: + """ + Execute the tool with the provided tool call. + + :param tool_call: The tool call to execute. + :return: The output of the tool operations. + """ + pass + + +class FunctionTool(Tool): + """ + A tool that executes user-defined functions. + """ + + def __init__(self, functions: Dict[str, Any]): + """ + Initialize FunctionTool with a dictionary of functions. + + :param functions: A dictionary where keys are function names and values are the function objects. + """ + self._functions = functions + self._definitions = self._build_function_definitions(functions) + + def _build_function_definitions(self, functions: Dict[str, Any]) -> List[FunctionToolDefinition]: + specs = [] + for name, func in functions.items(): + sig = inspect.signature(func) + params = sig.parameters + docstring = inspect.getdoc(func) + description = docstring.split("\n")[0] if docstring else "No description" + + properties = {} + for param_name, param in params.items(): + param_type = _map_type(param.annotation) + param_description = param.annotation.__doc__ if param.annotation != inspect.Parameter.empty else None + properties[param_name] = {"type": param_type, "description": param_description} + + function_def = FunctionDefinition( + name=name, + description=description, + parameters={"type": "object", "properties": properties, "required": list(params.keys())}, + ) + tool_def = FunctionToolDefinition(function=function_def) + specs.append(tool_def) + return specs + + def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, Dict[str, Any]]: + function_name = tool_call.function.name + arguments = tool_call.function.arguments + + if function_name not in self._functions: + logging.error(f"Function '{function_name}' not found.") + raise ValueError(f"Function '{function_name}' not found.") + + function = self._functions[function_name] + + try: + parsed_arguments = json.loads(arguments) + except json.JSONDecodeError as e: + logging.error(f"Invalid JSON arguments for function '{function_name}': {e}") + raise ValueError(f"Invalid JSON arguments: {e}") from e + + if not isinstance(parsed_arguments, dict): + logging.error(f"Arguments must be a JSON object for function '{function_name}'.") + raise TypeError("Arguments must be a JSON object.") + + return function, parsed_arguments + + def execute(self, tool_call: RequiredFunctionToolCall) -> Any: + function, parsed_arguments = self._get_func_and_args(tool_call) + + try: + return function(**parsed_arguments) if parsed_arguments else function() + except TypeError as e: + logging.error(f"Error executing function '{tool_call.function.name}': {e}") + raise + + @property + def definitions(self) -> List[FunctionToolDefinition]: + """ + Get the function definitions. + + :return: A list of function definitions. + """ + return self._definitions + + @property + def resources(self) -> ToolResources: + """ + Get the tool resources for the agent. + + :return: An empty ToolResources as FunctionTool doesn't have specific resources. + """ + return ToolResources() + + +class AsyncFunctionTool(FunctionTool): + + async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: + function, parsed_arguments = self._get_func_and_args(tool_call) + + try: + if inspect.iscoroutinefunction(function): + return await function(**parsed_arguments) if parsed_arguments else await function() + else: + return function(**parsed_arguments) if parsed_arguments else function() + except TypeError as e: + logging.error(f"Error executing function '{tool_call.function.name}': {e}") + raise + + +class FileSearchTool(Tool): + """ + A tool that searches for uploaded file information from the created vector stores. + """ + + def __init__(self): + self.vector_store_ids = [] + + def add_vector_store(self, store_id: str): + """ + Add a vector store ID to the list of vector stores to search for files. + """ + # TODO + self.vector_store_ids.append(store_id) + + @property + def definitions(self) -> List[FileSearchToolDefinition]: + """ + Get the file search tool definitions. + """ + return [FileSearchToolDefinition()] + + @property + def resources(self) -> ToolResources: + """ + Get the file search resources. + """ + return ToolResources(file_search=FileSearchToolResource(vector_store_ids=self.vector_store_ids)) + + def execute(self, tool_call: Any) -> Any: + pass + + +class CodeInterpreterTool(Tool): + """ + A tool that interprets code files uploaded to the agent. + """ + + def __init__(self): + self.file_ids = [] + + def add_file(self, file_id: str): + """ + Add a file ID to the list of files to interpret. + + :param file_id: The ID of the file to interpret. + """ + self.file_ids.append(file_id) + + @property + def definitions(self) -> List[CodeInterpreterToolDefinition]: + """ + Get the code interpreter tool definitions. + """ + return [CodeInterpreterToolDefinition()] + + @property + def resources(self) -> ToolResources: + """ + Get the code interpreter resources. + """ + return ToolResources(code_interpreter=CodeInterpreterToolResource(file_ids=self.file_ids)) + + def execute(self, tool_call: Any) -> Any: + pass + + +class ToolSet: + """ + A collection of tools that can be used by an agent. + """ + + def __init__(self): + self._tools = [] + + def validate_tool_type(self, tool_type: Type[Tool]) -> None: + """ + Validate the type of the tool. + + :param tool_type: The type of the tool to validate. + :raises ValueError: If the tool type is not a subclass of Tool. + """ + if isinstance(tool_type, AsyncFunctionTool): + raise ValueError( + "AsyncFunctionTool is not supported in ToolSet. To use async functions, use AsyncToolSet and agents operations in azure.ai.client.aio." + ) + + def add(self, tool: Tool): + """ + Add a tool to the tool set. + + :param tool: The tool to add. + :raises ValueError: If a tool of the same type already exists. + """ + self.validate_tool_type(type(tool)) + + if any(isinstance(existing_tool, type(tool)) for existing_tool in self._tools): + raise ValueError("Tool of type {type(tool).__name__} already exists in the ToolSet.") + self._tools.append(tool) + + def remove(self, tool_type: Type[Tool]) -> None: + """ + Remove a tool of the specified type from the tool set. + + :param tool_type: The type of tool to remove. + :raises ValueError: If a tool of the specified type is not found. + """ + for i, tool in enumerate(self._tools): + if isinstance(tool, tool_type): + del self._tools[i] + logging.info(f"Tool of type {tool_type.__name__} removed from the ToolSet.") + return + raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the definitions for all tools in the tool set. + """ + tools = [] + for tool in self._tools: + tools.extend(tool.definitions) + return tools + + @property + def resources(self) -> Dict[str, Any]: + """ + Get the resources for all tools in the tool set. + """ + tool_resources = {} + for tool in self._tools: + resources = tool.resources + for key, value in resources.items(): + if key in tool_resources: + if isinstance(tool_resources[key], dict) and isinstance(value, dict): + tool_resources[key].update(value) + else: + tool_resources[key] = value + return tool_resources + + def get_definitions_and_resources(self) -> Dict[str, Any]: + """ + Get the definitions and resources for all tools in the tool set. + + :return: A dictionary containing the tool resources and definitions. + """ + return { + "tool_resources": self.resources, + "tools": self.definitions, + } + + def get_tool(self, tool_type: Type[Tool]) -> Tool: + """ + Get a tool of the specified type from the tool set. + + :param tool_type: The type of tool to get. + :return: The tool of the specified type. + :raises ValueError: If a tool of the specified type is not found. + """ + for tool in self._tools: + if isinstance(tool, tool_type): + return tool + raise ValueError(f"Tool of type {tool_type.__name__} not found.") + + def execute_tool_calls(self, tool_calls: List[Any]) -> Any: + """ + Execute a tool of the specified type with the provided tool calls. + + :param tool_calls: A list of tool calls to execute. + :return: The output of the tool operations. + """ + tool_outputs = [] + + for tool_call in tool_calls: + try: + if tool_call.type == "function": + tool = self.get_tool(FunctionTool) + output = tool.execute(tool_call) + tool_output = { + "tool_call_id": tool_call.id, + "output": output, + } + tool_outputs.append(tool_output) + except Exception as e: + logging.error(f"Failed to execute tool call {tool_call}: {e}") + + return tool_outputs + + +class AsyncToolSet(ToolSet): + + def validate_tool_type(self, tool_type: Type[Tool]) -> None: + """ + Validate the type of the tool. + + :param tool_type: The type of the tool to validate. + :raises ValueError: If the tool type is not a subclass of Tool. + """ + if isinstance(tool_type, FunctionTool): + raise ValueError( + "FunctionTool is not supported in AsyncToolSet. Please use AsyncFunctionTool instead and provide sync and/or async function(s)." + ) + + async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: + """ + Execute a tool of the specified type with the provided tool calls. + + :param tool_calls: A list of tool calls to execute. + :return: The output of the tool operations. + """ + tool_outputs = [] + + for tool_call in tool_calls: + try: + if tool_call.type == "function": + tool = self.get_tool(AsyncFunctionTool) + output = await tool.execute(tool_call) + tool_output = { + "tool_call_id": tool_call.id, + "output": output, + } + tool_outputs.append(tool_output) + except Exception as e: + logging.error(f"Failed to execute tool call {tool_call}: {e}") + + return tool_outputs + + +class AgentEventHandler: + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + """Handle message delta events.""" + pass + + def on_thread_message(self, message: "ThreadMessage") -> None: + """Handle thread message events.""" + pass + + def on_thread_run(self, run: "ThreadRun") -> None: + """Handle thread run events.""" + pass + + def on_run_step(self, step: "RunStep") -> None: + """Handle run step events.""" + pass + + def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: + """Handle run step delta events.""" + pass + + def on_error(self, data: str) -> None: + """Handle error events.""" + pass + + def on_done(self) -> None: + """Handle the completion of the stream.""" + pass + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + """Handle any unhandled event types.""" + pass + + +class AsyncAgentEventHandler: + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + """Handle message delta events.""" + pass + + async def on_thread_message(self, message: "ThreadMessage") -> None: + """Handle thread message events.""" + pass + + async def on_thread_run(self, run: "ThreadRun") -> None: + """Handle thread run events.""" + pass + + async def on_run_step(self, step: "RunStep") -> None: + """Handle run step events.""" + pass + + async def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: + """Handle run step delta events.""" + pass + + async def on_error(self, data: str) -> None: + """Handle error events.""" + pass + + async def on_done(self) -> None: + """Handle the completion of the stream.""" + pass + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + """Handle any unhandled event types.""" + pass + + +class BaseAgentRunStream: + def __enter__(self): + return self + + def process_event(self, event_data_str: str) -> Tuple[str, Any]: + event_lines = event_data_str.strip().split("\n") + event_type = None + event_data = "" + + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + try: + parsed_data = json.loads(event_data) + except json.JSONDecodeError: + parsed_data = event_data + + # Workaround for service bug: Rename 'expires_at' to 'expired_at' + if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: + parsed_data["expired_at"] = parsed_data.pop("expires_at") + + # Map to the appropriate class instance + if event_type in { + AgentStreamEvent.THREAD_RUN_CREATED, + AgentStreamEvent.THREAD_RUN_QUEUED, + AgentStreamEvent.THREAD_RUN_IN_PROGRESS, + AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION, + AgentStreamEvent.THREAD_RUN_COMPLETED, + AgentStreamEvent.THREAD_RUN_FAILED, + AgentStreamEvent.THREAD_RUN_CANCELLING, + AgentStreamEvent.THREAD_RUN_CANCELLED, + AgentStreamEvent.THREAD_RUN_EXPIRED, + }: + event_data_obj = ThreadRun(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + elif event_type in { + AgentStreamEvent.THREAD_RUN_STEP_CREATED, + AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, + AgentStreamEvent.THREAD_RUN_STEP_COMPLETED, + AgentStreamEvent.THREAD_RUN_STEP_FAILED, + AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, + AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, + }: + event_data_obj = RunStep(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + elif event_type in { + AgentStreamEvent.THREAD_MESSAGE_CREATED, + AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, + AgentStreamEvent.THREAD_MESSAGE_COMPLETED, + AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, + }: + event_data_obj = ThreadMessage(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: + event_data_obj = MessageDeltaChunk(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: + event_data_obj = RunStepDeltaChunk(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + else: + event_data_obj = parsed_data + + return event_type, event_data_obj + + +class AsyncAgentRunStream(BaseAgentRunStream, AsyncIterator[Tuple[str, Any]]): + def __init__( + self, + response_iterator: AsyncIterator[bytes], + event_handler: Optional[AsyncAgentEventHandler] = None, + ): + self.response_iterator = response_iterator + self.event_handler = event_handler + self.done = False + self.buffer = "" + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + close_method = getattr(self.response_iterator, "close", None) + if callable(close_method): + await close_method() + + def __aiter__(self): + return self + + async def __anext__(self) -> Tuple[str, Any]: + while True: + try: + chunk = await self.response_iterator.__anext__() + self.buffer += chunk.decode("utf-8") + except StopAsyncIteration: + if self.buffer: + event_data_str, self.buffer = self.buffer, "" + if event_data_str: + return await self.process_event(event_data_str) + raise StopAsyncIteration + + while "\n\n" in self.buffer: + event_data_str, self.buffer = self.buffer.split("\n\n", 1) + return await self.process_event(event_data_str) + + async def process_event(self, event_data_str: str) -> Tuple[str, Any]: + event_type, event_data_obj = super().process_event(event_data_str) + + if self.event_handler: + try: + if isinstance(event_data_obj, MessageDeltaChunk): + await self.event_handler.on_message_delta(event_data_obj) + elif isinstance(event_data_obj, ThreadMessage): + await self.event_handler.on_thread_message(event_data_obj) + elif isinstance(event_data_obj, ThreadRun): + await self.event_handler.on_thread_run(event_data_obj) + elif isinstance(event_data_obj, RunStep): + await self.event_handler.on_run_step(event_data_obj) + elif isinstance(event_data_obj, RunStepDeltaChunk): + await self.event_handler.on_run_step_delta(event_data_obj) + elif event_type == AgentStreamEvent.ERROR: + await self.event_handler.on_error(event_data_obj) + elif event_type == AgentStreamEvent.DONE: + await self.event_handler.on_done() + self.done = True # Mark the stream as done + else: + await self.event_handler.on_unhandled_event(event_type, event_data_obj) + except Exception as e: + logging.error(f"Error in event handler for event '{event_type}': {e}") + + return event_type, event_data_obj + + async def until_done(self) -> None: + """ + Iterates through all events until the stream is marked as done. + """ + try: + async for _ in self: + pass # The EventHandler handles the events + except StopAsyncIteration: + pass + + +class AgentRunStream(BaseAgentRunStream, Iterator[Tuple[str, Any]]): + def __init__( + self, + response_iterator: Iterator[bytes], + event_handler: Optional[AgentEventHandler] = None, + ): + self.response_iterator = response_iterator + self.event_handler = event_handler + self.done = False + self.buffer = "" + + def __exit__(self, exc_type, exc_val, exc_tb): + close_method = getattr(self.response_iterator, "close", None) + if callable(close_method): + close_method() + + def __iter__(self): + return self + + def __next__(self) -> Tuple[str, Any]: + if self.done: + raise StopIteration + while True: + try: + chunk = next(self.response_iterator) + self.buffer += chunk.decode("utf-8") + except StopIteration: + if self.buffer: + event_data_str, self.buffer = self.buffer, "" + if event_data_str: + return self.process_event(event_data_str) + raise StopIteration + + while "\n\n" in self.buffer: + event_data_str, self.buffer = self.buffer.split("\n\n", 1) + return self.process_event(event_data_str) + + def process_event(self, event_data_str: str) -> Tuple[str, Any]: + event_type, event_data_obj = super().process_event(event_data_str) + + if self.event_handler: + try: + if isinstance(event_data_obj, MessageDeltaChunk): + self.event_handler.on_message_delta(event_data_obj) + elif isinstance(event_data_obj, ThreadMessage): + self.event_handler.on_thread_message(event_data_obj) + elif isinstance(event_data_obj, ThreadRun): + self.event_handler.on_thread_run(event_data_obj) + elif isinstance(event_data_obj, RunStep): + self.event_handler.on_run_step(event_data_obj) + elif isinstance(event_data_obj, RunStepDeltaChunk): + self.event_handler.on_run_step_delta(event_data_obj) + elif event_type == AgentStreamEvent.ERROR: + self.event_handler.on_error(event_data_obj) + elif event_type == AgentStreamEvent.DONE: + self.event_handler.on_done() + self.done = True # Mark the stream as done + else: + self.event_handler.on_unhandled_event(event_type, event_data_obj) + except Exception as e: + logging.error(f"Error in event handler for event '{event_type}': {e}") + + return event_type, event_data_obj + + def until_done(self) -> None: + """ + Iterates through all events until the stream is marked as done. + """ + try: + for _ in self: + pass # The EventHandler handles the events + except StopIteration: + pass + + +__all__: List[str] = [ + "AsyncAgentEventHandler", + "AgentEventHandler", + "AsyncAgentRunStream", + "AgentRunStream", + "AsyncFunctionTool", + "AsyncToolSet", + "FunctionTool", + "FileSearchTool", + "CodeInterpreterTool", + "Tool", + "ToolSet", +] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py index 4f2383d40c3e..a9e40b7a014c 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py @@ -6,8 +6,8 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._operations import EndpointsOperations from ._operations import AgentsOperations +from ._operations import EndpointsOperations from ._operations import EvaluationsOperations from ._patch import __all__ as _patch_all @@ -15,8 +15,8 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "EndpointsOperations", "AgentsOperations", + "EndpointsOperations", "EvaluationsOperations", "InferenceOperations", ] diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py index 8139efd97aac..235a0449290d 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py @@ -9,7 +9,7 @@ from io import IOBase import json import sys -from typing import Any, Callable, Dict, IO, Iterable, List, Optional, Type, TypeVar, Union, overload +from typing import Any, Callable, Dict, IO, Iterable, List, Optional, TYPE_CHECKING, Type, TypeVar, Union, overload import urllib.parse from azure.core.exceptions import ( @@ -28,14 +28,19 @@ from azure.core.tracing.decorator import distributed_trace from azure.core.utils import case_insensitive_dict -from .. import models as _models +from .. import _model_base, models as _models from .._model_base import SdkJSONEncoder, _deserialize from .._serialization import Serializer +from .._vendor import FileType, prepare_multipart_form_data if sys.version_info >= (3, 9): from collections.abc import MutableMapping else: from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from .. import _types JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() T = TypeVar("T") @@ -45,18 +50,55 @@ _SERIALIZER.client_side_validation = False -def build_endpoints_list_request(**kwargs: Any) -> HttpRequest: +def build_agents_create_agent_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/connections" + _url = "/assistants" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_agents_request( + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants" # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -64,7 +106,274 @@ def build_endpoints_list_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_endpoints_list_secrets_request(connection_name_in_url: str, **kwargs: Any) -> HttpRequest: +def build_agents_get_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_update_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_thread_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_update_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_message_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_messages_request( + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if run_id is not None: + _params["runId"] = _SERIALIZER.query("run_id", run_id, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages/{messageId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "messageId": _SERIALIZER.url("message_id", message_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_update_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -72,123 +381,5780 @@ def build_endpoints_list_secrets_request(connection_name_in_url: str, **kwargs: api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) accept = _headers.pop("Accept", "application/json") - # Construct URL - _url = "/connections/{connectionNameInUrl}/listsecrets" - path_format_arguments = { - "connectionNameInUrl": _SERIALIZER.url("connection_name_in_url", connection_name_in_url, "str"), - } + # Construct URL + _url = "/threads/{threadId}/messages/{messageId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "messageId": _SERIALIZER.url("message_id", message_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_run_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_runs_request( + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_update_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_submit_tool_outputs_to_run_request( # pylint: disable=name-too-long + thread_id: str, run_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/submit_tool_outputs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_cancel_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/cancel" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_thread_and_run_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/runs" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_run_step_request(thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/steps/{stepId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + "stepId": _SERIALIZER.url("step_id", step_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_run_steps_request( + thread_id: str, + run_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/steps" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_files_request( + *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if purpose is not None: + _params["purpose"] = _SERIALIZER.query("purpose", purpose, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_upload_file_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_file_request(file_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files/{fileId}" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_file_request(file_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files/{fileId}" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_file_content_request(file_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files/{fileId}/content" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_vector_stores_request( + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_vector_store_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_modify_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_vector_store_files_request( # pylint: disable=name-too-long + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if filter is not None: + _params["filter"] = _SERIALIZER.query("filter", filter, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, file_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files/{fileId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, file_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files/{fileId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, batch_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_cancel_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, batch_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/cancel" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_vector_store_file_batch_files_request( # pylint: disable=name-too-long + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if filter is not None: + _params["filter"] = _SERIALIZER.query("filter", filter, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_endpoints_list_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/connections" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_endpoints_list_secrets_request(connection_name_in_url: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/connections/{connectionNameInUrl}/listsecrets" + path_format_arguments = { + "connectionNameInUrl": _SERIALIZER.url("connection_name_in_url", connection_name_in_url, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_create_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/create" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_list_request( + *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + if skip is not None: + _params["skip"] = _SERIALIZER.query("skip", skip, "int") + if maxpagesize is not None: + _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_update_request(id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/{id}" + path_format_arguments = { + "id": _SERIALIZER.url("id", id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_get_request(id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/{id}" + path_format_arguments = { + "id": _SERIALIZER.url("id", id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class AgentsOperations: # pylint: disable=too-many-public-methods + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.client.AzureAIClient`'s + :attr:`agents` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + if model is _Unset: + raise TypeError("missing required argument: model") + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_agent_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_agents( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfAgent: + """Gets a list of agents that were previously created. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfAgent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) + + _request = build_agents_list_agents_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: + """Retrieves an existing agent. + + :param assistant_id: Identifier of the agent. Required. + :type assistant_id: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + _request = build_agents_get_agent_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_agent( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_agent( + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_agent( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_agent( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_agent_request( + assistant_id=assistant_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: + """Deletes an agent. + + :param assistant_id: Identifier of the agent. Required. + :type assistant_id: str + :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_agent_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_thread( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread( + self, + *, + content_type: str = "application/json", + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.client.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_thread( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.client.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_thread_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: + """Gets information about an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + _request = build_agents_get_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_thread( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_thread( + self, + thread_id: str, + *, + content_type: str = "application/json", + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_thread( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_thread( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.client.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_thread_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: + """Deletes an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_message( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_message( + self, + thread_id: str, + *, + role: Union[str, _models.MessageRole], + content: str, + content_type: str = "application/json", + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.client.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.client.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_message( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_message( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + role: Union[str, _models.MessageRole] = _Unset, + content: str = _Unset, + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.client.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.client.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + if role is _Unset: + raise TypeError("missing required argument: role") + if content is _Unset: + raise TypeError("missing required argument: content") + body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_message_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_messages( + self, + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadMessage: + """Gets a list of messages that exist on a thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword run_id: Filter messages by the run ID that generated them. Default value is None. + :paramtype run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible + with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) + + _request = build_agents_list_messages_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: + """Gets an existing message from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + _request = build_agents_get_message_request( + thread_id=thread_id, + message_id=message_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_message( + self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_message( + self, + thread_id: str, + message_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_message( + self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_message( + self, + thread_id: str, + message_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_message_request( + thread_id=thread_id, + message_id=message_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_run( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "additional_instructions": additional_instructions, + "additional_messages": additional_messages, + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_run_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_runs( + self, + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadRun: + """Gets a list of runs for a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_list_runs_request( + thread_id=thread_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Gets an existing run from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_get_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_run( + self, + thread_id: str, + run_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if tool_outputs is _Unset: + raise TypeError("missing required argument: tool_outputs") + body = {"stream": stream_parameter, "tool_outputs": tool_outputs} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_submit_tool_outputs_to_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Cancels a run of an in progress thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_cancel_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_thread_and_run( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread_and_run( + self, + *, + assistant_id: str, + content_type: str = "application/json", + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :keyword assistant_id: The ID of the agent for which the thread should be created. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.client.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread_and_run( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_thread_and_run( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent for which the thread should be created. Required. + :paramtype assistant_id: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.client.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "thread": thread, + "tool_choice": tool_choice, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_thread_and_run_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> _models.RunStep: + """Gets a single run step from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param step_id: Identifier of the run step. Required. + :type step_id: str + :return: RunStep. The RunStep is compatible with MutableMapping + :rtype: ~azure.ai.client.models.RunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) + + _request = build_agents_get_run_step_request( + thread_id=thread_id, + run_id=run_id, + step_id=step_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.RunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_run_steps( + self, + thread_id: str, + run_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfRunStep: + """Gets a list of run steps from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfRunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) + + _request = build_agents_list_run_steps_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_files( + self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any + ) -> _models.FileListResponse: + """Gets a list of previously uploaded files. + + :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is + None. + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose + :return: FileListResponse. The FileListResponse is compatible with MutableMapping + :rtype: ~azure.ai.client.models.FileListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) + + _request = build_agents_list_files_request( + purpose=purpose, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileListResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: The file data, in bytes. Required. + :paramtype file: ~azure.ai.client._vendor.FileType + :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and + Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose + :keyword filename: The name of the file. Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def upload_file( + self, + body: JSON = _Unset, + *, + file: FileType = _Unset, + purpose: Union[str, _models.FilePurpose] = _Unset, + filename: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Is one of the following types: JSON Required. + :type body: JSON + :keyword file: The file data, in bytes. Required. + :paramtype file: ~azure.ai.client._vendor.FileType + :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and + Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose + :keyword filename: The name of the file. Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + if body is _Unset: + if file is _Unset: + raise TypeError("missing required argument: file") + if purpose is _Unset: + raise TypeError("missing required argument: purpose") + body = {"file": file, "filename": filename, "purpose": purpose} + body = {k: v for k, v in body.items() if v is not None} + _body = body.as_dict() if isinstance(body, _model_base.Model) else body + _file_fields: List[str] = ["file"] + _data_fields: List[str] = ["purpose", "filename"] + _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) + + _request = build_agents_upload_file_request( + api_version=self._config.api_version, + files=_files, + data=_data, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: + """Delete a previously uploaded file. + + :param file_id: The ID of the file to delete. Required. + :type file_id: str + :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.client.models.FileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _request = build_agents_get_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentResponse: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: FileContentResponse. The FileContentResponse is compatible with MutableMapping + :rtype: ~azure.ai.client.models.FileContentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileContentResponse] = kwargs.pop("cls", None) + + _request = build_agents_get_file_content_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileContentResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_stores( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStore: + """Returns a list of vector stores. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible + with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_stores_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "chunking_strategy": chunking_strategy, + "expires_after": expires_after, + "file_ids": file_ids, + "metadata": metadata, + "name": name, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: + """Returns the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def modify_vector_store( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def modify_vector_store( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def modify_vector_store( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def modify_vector_store( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"expires_after": expires_after, "metadata": metadata, "name": name} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_modify_vector_store_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: + """Deletes the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_store_files( + self, + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.client.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_store_files_request( + vector_store_id=vector_store_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store_file( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file( + self, + vector_store_id: str, + *, + file_id: str, + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_id: Identifier of the file. Required. + :paramtype file_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_id: str = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_id: Identifier of the file. Required. + :paramtype file_id: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + if body is _Unset: + if file_id is _Unset: + raise TypeError("missing required argument: file_id") + body = {"chunking_strategy": chunking_strategy, "file_id": file_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_file_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: + """Retrieves a vector store file. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_vector_store_file( + self, vector_store_id: str, file_id: str, **kwargs: Any + ) -> _models.VectorStoreFileDeletionStatus: + """Delete a vector store file. This will remove the file from the vector store but the file itself + will not be deleted. + To delete the file, use the delete file endpoint. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store_file_batch( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch( + self, + vector_store_id: str, + *, + file_ids: List[str], + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file_batch( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: List[str] = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + if body is _Unset: + if file_ids is _Unset: + raise TypeError("missing required argument: file_ids") + body = {"chunking_strategy": chunking_strategy, "file_ids": file_ids} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_file_batch_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) - _url: str = _url.format(**path_format_arguments) # type: ignore + response = pipeline_response.http_response - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + return deserialized # type: ignore -def build_evaluations_create_request(**kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + @distributed_trace + def get_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Retrieve a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} - # Construct URL - _url = "/evaluations/create" + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + _request = build_agents_get_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + response = pipeline_response.http_response + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) -def build_evaluations_list_request( - *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore - # Construct URL - _url = "/evaluations" + return deserialized # type: ignore - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if top is not None: - _params["top"] = _SERIALIZER.query("top", top, "int") - if skip is not None: - _params["skip"] = _SERIALIZER.query("skip", skip, "int") - if maxpagesize is not None: - _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") + @distributed_trace + def cancel_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch + as soon as possible. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_agents_cancel_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) -def build_evaluations_update_request(id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + response = pipeline_response.http_response - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) - # Construct URL - _url = "/evaluations/{id}" - path_format_arguments = { - "id": _SERIALIZER.url("id", id, "str"), - } + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - _url: str = _url.format(**path_format_arguments) # type: ignore + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + return deserialized # type: ignore - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + @distributed_trace + def list_vector_store_file_batch_files( + self, + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files in a batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.client.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.client.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) - return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) -def build_evaluations_get_request(id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + _request = build_agents_list_vector_store_file_batch_files_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) - # Construct URL - _url = "/evaluations/{id}" - path_format_arguments = { - "id": _SERIALIZER.url("id", id, "str"), - } + response = pipeline_response.http_response - _url: str = _url.format(**path_format_arguments) # type: ignore + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + return deserialized # type: ignore class EndpointsOperations: @@ -420,24 +6386,6 @@ def _list_secrets( # pylint: disable=protected-access return deserialized # type: ignore -class AgentsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.client.AzureAIClient`'s - :attr:`agents` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - class EvaluationsOperations: """ .. warning:: diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index 003401135bd9..3d994f738397 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -6,28 +6,46 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import logging - -from typing import List, Iterable +import sys, io, logging, os, time +from io import IOBase +from typing import List, Iterable, Union, IO, Any, Dict, Optional, overload, TYPE_CHECKING # from zoneinfo import ZoneInfo from ._operations import EndpointsOperations as EndpointsOperationsGenerated +from ._operations import AgentsOperations as AgentsOperationsGenerated from ..models._enums import AuthenticationType, EndpointType from ..models._models import ConnectionsListSecretsResponse, ConnectionsListResponse +from .._types import AgentsApiResponseFormatOption from ..models._patch import EndpointProperties +from ..models._enums import FilePurpose +from .._vendor import FileType +from .. import models as _models + +from azure.core.tracing.decorator import distributed_trace + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + import _types + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() logger = logging.getLogger(__name__) -class InferenceOperations(): + +class InferenceOperations: def __init__(self, outer_instance): self.outer_instance = outer_instance - - def get_chat_completions_client(self) -> "ChatComletionsClient": + def get_chat_completions_client(self) -> "ChatCompletionsClient": endpoint = self.outer_instance.endpoints.get_default( - endpoint_type=EndpointType.SERVERLESS, - populate_secrets=True + endpoint_type=EndpointType.SERVERLESS, populate_secrets=True ) if not endpoint: raise ValueError("No serverless endpoint found") @@ -35,29 +53,31 @@ def get_chat_completions_client(self) -> "ChatComletionsClient": try: from azure.ai.inference import ChatCompletionsClient except ModuleNotFoundError as _: - raise ModuleNotFoundError("Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'") + raise ModuleNotFoundError( + "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" + ) if endpoint.authentication_type == AuthenticationType.API_KEY: - logger.debug("[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication") - from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient( - endpoint=endpoint.endpoint_url, - credential=AzureKeyCredential(endpoint.key) + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" ) + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) elif endpoint.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth - logger.debug("[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication") + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" + ) client = ChatCompletionsClient( - endpoint=endpoint.endpoint_url, - credential=endpoint.properties.token_credential + endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential ) elif endpoint.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. - logger.debug("[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication") - client = ChatCompletionsClient( - endpoint=endpoint.endpoint_url, - credential=endpoint.token_credential + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" ) + client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=endpoint.token_credential) else: raise ValueError("Unknown authentication type") @@ -65,8 +85,7 @@ def get_chat_completions_client(self) -> "ChatComletionsClient": def get_embeddings_client(self) -> "EmbeddingsClient": endpoint = self.outer_instance.endpoints.get_default( - endpoint_type=EndpointType.SERVERLESS, - populate_secrets=True + endpoint_type=EndpointType.SERVERLESS, populate_secrets=True ) if not endpoint: raise ValueError("No serverless endpoint found") @@ -74,29 +93,29 @@ def get_embeddings_client(self) -> "EmbeddingsClient": try: from azure.ai.inference import EmbeddingsClient except ModuleNotFoundError as _: - raise ModuleNotFoundError("Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'") + raise ModuleNotFoundError( + "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" + ) if endpoint.authentication_type == AuthenticationType.API_KEY: - logger.debug("[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication") - from azure.core.credentials import AzureKeyCredential - client = EmbeddingsClient( - endpoint=endpoint.endpoint_url, - credential=AzureKeyCredential(endpoint.key) + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" ) + from azure.core.credentials import AzureKeyCredential + + client = EmbeddingsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) elif endpoint.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth - logger.debug("[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication") - client = EmbeddingsClient( - endpoint=endpoint.endpoint_url, - credential=endpoint.properties.token_credential + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" ) + client = EmbeddingsClient(endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential) elif endpoint.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. - logger.debug("[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication") - client = EmbeddingsClient( - endpoint=endpoint.endpoint_url, - credential=endpoint.token_credential + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" ) + client = EmbeddingsClient(endpoint=endpoint.endpoint_url, credential=endpoint.token_credential) else: raise ValueError("Unknown authentication type") @@ -104,8 +123,7 @@ def get_embeddings_client(self) -> "EmbeddingsClient": def get_azure_openai_client(self) -> "AzureOpenAI": endpoint = self.outer_instance.endpoints.get_default( - endpoint_type=EndpointType.AZURE_OPEN_AI, - populate_secrets=True + endpoint_type=EndpointType.AZURE_OPEN_AI, populate_secrets=True ) if not endpoint: raise ValueError("No Azure OpenAI endpoint found") @@ -116,28 +134,38 @@ def get_azure_openai_client(self) -> "AzureOpenAI": raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai'") if endpoint.authentication_type == AuthenticationType.API_KEY: - logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication") + logger.debug( + "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" + ) client = AzureOpenAI( api_key=endpoint.key, azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", # TODO: Is this needed? + api_version="2024-08-01-preview", # TODO: Is this needed? ) elif endpoint.authentication_type == AuthenticationType.AAD: - logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication") + logger.debug( + "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" + ) try: from azure.identity import get_bearer_token_provider except ModuleNotFoundError as _: - raise ModuleNotFoundError("azure.identity package not installed. Please install it using 'pip install azure.identity'") + raise ModuleNotFoundError( + "azure.identity package not installed. Please install it using 'pip install azure.identity'" + ) client = AzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider(endpoint.token_credential, "https://cognitiveservices.azure.com/.default"), + azure_ad_token_provider=get_bearer_token_provider( + endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + ), azure_endpoint=endpoint.endpoint_url, api_version="2024-08-01-preview", ) elif endpoint.authentication_type == AuthenticationType.SAS: logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") client = AzureOpenAI( - azure_ad_token_provider=get_bearer_token_provider(endpoint.token_credential, "https://cognitiveservices.azure.com/.default"), + azure_ad_token_provider=get_bearer_token_provider( + endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + ), azure_endpoint=endpoint.endpoint_url, api_version="2024-08-01-preview", ) @@ -213,9 +241,896 @@ def list( return endpoint_properties_list +class AgentsOperations(AgentsOperationsGenerated): + @overload + def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent( + self, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """ + Creates a new agent with toolset. + + :keyword model: The ID of the model to use. Required if `body` is not provided. + :paramtype model: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: A description for the new agent. Default value is None. + :paramtype description: str + :keyword instructions: System instructions for the agent. Default value is None. + :paramtype instructions: str + :keyword toolset: Collection of tools (alternative to `tools` and `tool_resources`). Default + value is None. + :paramtype toolset: ~azure.ai.client.models.ToolSet + :keyword temperature: Sampling temperature for generating agent responses. Default value + is None. + :paramtype temperature: float + :keyword top_p: Nucleus sampling parameter. Default value is None. + :paramtype top_p: float + :keyword response_format: Response format for tool calls. Default value is None. + :paramtype response_format: ~azure.ai.client.models.AgentsApiResponseFormatOption + :keyword metadata: Key/value pairs for storing additional information. Default value is None. + :paramtype metadata: dict[str, str] + :return: An Agent object. + :rtype: ~azure.ai.client.models.Agent + :raises: ~azure.core.exceptions.HttpResponseError + """ + + @distributed_trace + def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.Agent: + """ + Creates a new agent with various configurations, delegating to the generated operations. + + :param body: JSON or IO[bytes]. Required if `model` is not provided. + :param model: The ID of the model to use. Required if `body` is not provided. + :param name: The name of the new agent. + :param description: A description for the new agent. + :param instructions: System instructions for the agent. + :param tools: List of tools definitions for the agent. + :param tool_resources: Resources used by the agent's tools. + :param toolset: Collection of tools (alternative to `tools` and `tool_resources`). + :param temperature: Sampling temperature for generating agent responses. + :param top_p: Nucleus sampling parameter. + :param response_format: Response format for tool calls. + :param metadata: Key/value pairs for storing additional information. + :param content_type: Content type of the body. + :param kwargs: Additional parameters. + :return: An Agent object. + :raises: HttpResponseError for HTTP errors. + """ + if body is not _Unset: + if isinstance(body, IOBase): + return super().create_agent(body=body, content_type=content_type, **kwargs) + return super().create_agent(body=body, **kwargs) + + if toolset is not None: + self._toolset = toolset + tools = toolset.definitions + tool_resources = toolset.resources + + return super().create_agent( + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + def get_toolset(self) -> Optional[_models.ToolSet]: + """ + Get the toolset for the agent. + + :return: The toolset for the agent. If not set, returns None. + :rtype: ~azure.ai.client.models.ToolSet + """ + if hasattr(self, "_toolset"): + return self._toolset + return None + + @overload + def create_run( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword stream: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword stream: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=stream, + stream=stream, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # If streaming is enabled, return the custom stream object + if stream: + return _models.AgentRunStream(response, event_handler) + else: + return response + + @distributed_trace + def create_and_process_run( + self, + thread_id: str, + assistant_id: str, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AgentEventHandler] = None, + sleep_interval: int = 1, + **kwargs: Any, + ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + """Creates a new run for an agent thread and processes the run. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword stream: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or + ~azure.ai.client.models.AgentsApiResponseFormatMode or + ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler + :keyword sleep_interval: The time in seconds to wait between polling the service for run status. + Default value is 1. + :paramtype sleep_interval: int + :return: str or AgentRunStream. The run completion status if streaming is disabled, otherwise + the AgentRunStream object. + :rtype: str or ~azure.ai.client.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + # Create and initiate the run with additional parameters + run = self.create_run( + thread_id=thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream=stream, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + event_handler=event_handler, + **kwargs, + ) + + # Return the run stream object if streaming is enabled + if stream: + return run + + # Monitor and process the run status + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(sleep_interval) + run = self.get_run(thread_id=thread_id, run_id=run.id) + + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logging.warning("No tool calls provided - cancelling run") + self.cancel_run(thread_id=thread_id, run_id=run.id) + break + + toolset = self.get_toolset() + if toolset: + tool_outputs = toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + logging.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) + + logging.info("Current run status: %s", run.status) + + return run + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream: Optional[bool] = None, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword stream: Default value is None. + :paramtype stream: bool + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + stream: Optional[bool] = None, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] + :param stream: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :param event_handler: The event handler to use for processing events during the run. + :param kwargs: Additional parameters. + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=stream, stream=stream, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # If streaming is enabled, return the custom stream object + if stream: + return _models.AgentRunStream(response, event_handler) + else: + return response + + @overload + def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.client._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file(self, file_path: str, *, purpose: str, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def upload_file( + self, + body: Union[JSON, None] = None, + *, + file: Union[FileType, None] = None, + file_path: Optional[str] = None, + purpose: Optional[Union[str, _models.FilePurpose]] = None, + filename: Optional[str] = None, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :param file: File content. Required if `body` and `purpose` are not provided. + :param file_path: Path to the file. Required if `body` and `purpose` are not provided. + :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :param filename: The name of the file. + :param kwargs: Additional parameters. + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + return super().upload_file(body=body, **kwargs) + + if isinstance(purpose, FilePurpose): + purpose = purpose.value + + if file is not None and purpose is not None: + return super().upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + + if file_path is not None and purpose is not None: + if not os.path.isfile(file_path): + raise FileNotFoundError(f"The file path provided does not exist: {file_path}") + + try: + with open(file_path, "rb") as f: + content = f.read() + + # Determine filename and create correct FileType + base_filename = filename or os.path.basename(file_path) + file_content: FileType = (base_filename, content) + + return super().upload_file(file=file_content, purpose=purpose, **kwargs) + except IOError as e: + raise IOError(f"Unable to read file: {file_path}. Reason: {str(e)}") + + raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") + + __all__: List[str] = [ + "AgentsOperations", "EndpointsOperations", - "InferenceOperations" + "InferenceOperations", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-client/generated_tests/test_agents_operations.py b/sdk/ai/azure-ai-client/generated_tests/test_agents_operations.py new file mode 100644 index 000000000000..cc587a49dc53 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_tests/test_agents_operations.py @@ -0,0 +1,606 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils import recorded_by_proxy +from testpreparer import ClientTestBase, Preparer + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestAgentsOperations(ClientTestBase): + @Preparer() + @recorded_by_proxy + def test_agents_create_agent(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.create_agent( + body={ + "model": "str", + "description": "str", + "instructions": "str", + "metadata": {"str": "str"}, + "name": "str", + "response_format": "str", + "temperature": 0.0, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + "tools": ["tool_definition"], + "top_p": 0.0, + }, + model="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_list_agents(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.list_agents() + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_get_agent(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.get_agent( + agent_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_update_agent(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.update_agent( + agent_id="str", + body={ + "description": "str", + "instructions": "str", + "metadata": {"str": "str"}, + "model": "str", + "name": "str", + "response_format": "str", + "temperature": 0.0, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + "tools": ["tool_definition"], + "top_p": 0.0, + }, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_delete_agent(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.delete_agent( + agent_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_create_thread(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.create_thread( + body={ + "messages": [ + { + "content": "str", + "role": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "metadata": {"str": "str"}, + } + ], + "metadata": {"str": "str"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + }, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_get_thread(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.get_thread( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_update_thread(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.update_thread( + thread_id="str", + body={ + "metadata": {"str": "str"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + }, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_delete_thread(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.delete_thread( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_create_message(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.create_message( + thread_id="str", + body={ + "content": "str", + "role": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "metadata": {"str": "str"}, + }, + role="str", + content="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_list_messages(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.list_messages( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_get_message(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.get_message( + thread_id="str", + message_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_update_message(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.update_message( + thread_id="str", + message_id="str", + body={"metadata": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_create_run(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.create_run( + thread_id="str", + body={ + "agent_id": "str", + "additional_instructions": "str", + "additional_messages": [ + { + "agent_id": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "completed_at": "2020-02-20 00:00:00", + "content": ["message_content"], + "created_at": "2020-02-20 00:00:00", + "id": "str", + "incomplete_at": "2020-02-20 00:00:00", + "incomplete_details": {"reason": "str"}, + "metadata": {"str": "str"}, + "object": "thread.message", + "role": "str", + "run_id": "str", + "status": "str", + "thread_id": "str", + } + ], + "instructions": "str", + "max_completion_tokens": 0, + "max_prompt_tokens": 0, + "metadata": {"str": "str"}, + "model": "str", + "response_format": "str", + "stream": bool, + "temperature": 0.0, + "tool_choice": "str", + "tools": ["tool_definition"], + "top_p": 0.0, + "truncation_strategy": {"type": "str", "last_messages": 0}, + }, + agent_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_list_runs(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.list_runs( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_get_run(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.get_run( + thread_id="str", + run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_update_run(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.update_run( + thread_id="str", + run_id="str", + body={"metadata": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_submit_tool_outputs_to_run(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.submit_tool_outputs_to_run( + thread_id="str", + run_id="str", + body={"tool_outputs": [{"output": "str", "tool_call_id": "str"}], "stream": bool}, + tool_outputs=[{"output": "str", "tool_call_id": "str"}], + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_cancel_run(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.cancel_run( + thread_id="str", + run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_create_thread_and_run(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.create_thread_and_run( + body={ + "agent_id": "str", + "instructions": "str", + "max_completion_tokens": 0, + "max_prompt_tokens": 0, + "metadata": {"str": "str"}, + "model": "str", + "response_format": "str", + "stream": bool, + "temperature": 0.0, + "thread": { + "messages": [ + { + "content": "str", + "role": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "metadata": {"str": "str"}, + } + ], + "metadata": {"str": "str"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + }, + "tool_choice": "str", + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + "tools": ["tool_definition"], + "top_p": 0.0, + "truncation_strategy": {"type": "str", "last_messages": 0}, + }, + agent_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_get_run_step(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.get_run_step( + thread_id="str", + run_id="str", + step_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_list_run_steps(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.list_run_steps( + thread_id="str", + run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_list_files(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.list_files() + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_upload_file(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.upload_file( + body={"file": "filetype", "purpose": "str", "filename": "str"}, + file="filetype", + purpose="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_delete_file(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.delete_file( + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_get_file(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.get_file( + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_get_file_content(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.get_file_content( + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_list_vector_stores(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.list_vector_stores() + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_create_vector_store(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.create_vector_store( + body={ + "chunking_strategy": "vector_store_chunking_strategy_request", + "expires_after": {"anchor": "str", "days": 0}, + "file_ids": ["str"], + "metadata": {"str": "str"}, + "name": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_get_vector_store(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.get_vector_store( + vector_store_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_modify_vector_store(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.modify_vector_store( + vector_store_id="str", + body={"expires_after": {"anchor": "str", "days": 0}, "metadata": {"str": "str"}, "name": "str"}, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_delete_vector_store(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.delete_vector_store( + vector_store_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_list_vector_store_files(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.list_vector_store_files( + vector_store_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_create_vector_store_file(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.create_vector_store_file( + vector_store_id="str", + body={"file_id": "str", "chunking_strategy": "vector_store_chunking_strategy_request"}, + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_get_vector_store_file(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.get_vector_store_file( + vector_store_id="str", + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_delete_vector_store_file(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.delete_vector_store_file( + vector_store_id="str", + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_create_vector_store_file_batch(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.create_vector_store_file_batch( + vector_store_id="str", + body={"file_ids": ["str"], "chunking_strategy": "vector_store_chunking_strategy_request"}, + file_ids=["str"], + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_get_vector_store_file_batch(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.get_vector_store_file_batch( + vector_store_id="str", + batch_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_cancel_vector_store_file_batch(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.cancel_vector_store_file_batch( + vector_store_id="str", + batch_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy + def test_agents_list_vector_store_file_batch_files(self, _endpoint): + client = self.create_client(endpoint=_endpoint) + response = client.agents.list_vector_store_file_batch_files( + vector_store_id="str", + batch_id="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_agents_operations_async.py b/sdk/ai/azure-ai-client/generated_tests/test_agents_operations_async.py new file mode 100644 index 000000000000..56536356a3ae --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_tests/test_agents_operations_async.py @@ -0,0 +1,607 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils.aio import recorded_by_proxy_async +from testpreparer import Preparer +from testpreparer_async import ClientTestBaseAsync + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestAgentsOperationsAsync(ClientTestBaseAsync): + @Preparer() + @recorded_by_proxy_async + async def test_agents_create_agent(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.create_agent( + body={ + "model": "str", + "description": "str", + "instructions": "str", + "metadata": {"str": "str"}, + "name": "str", + "response_format": "str", + "temperature": 0.0, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + "tools": ["tool_definition"], + "top_p": 0.0, + }, + model="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_list_agents(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.list_agents() + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_get_agent(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.get_agent( + agent_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_update_agent(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.update_agent( + agent_id="str", + body={ + "description": "str", + "instructions": "str", + "metadata": {"str": "str"}, + "model": "str", + "name": "str", + "response_format": "str", + "temperature": 0.0, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + "tools": ["tool_definition"], + "top_p": 0.0, + }, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_delete_agent(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.delete_agent( + agent_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_create_thread(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.create_thread( + body={ + "messages": [ + { + "content": "str", + "role": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "metadata": {"str": "str"}, + } + ], + "metadata": {"str": "str"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + }, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_get_thread(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.get_thread( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_update_thread(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.update_thread( + thread_id="str", + body={ + "metadata": {"str": "str"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + }, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_delete_thread(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.delete_thread( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_create_message(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.create_message( + thread_id="str", + body={ + "content": "str", + "role": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "metadata": {"str": "str"}, + }, + role="str", + content="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_list_messages(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.list_messages( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_get_message(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.get_message( + thread_id="str", + message_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_update_message(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.update_message( + thread_id="str", + message_id="str", + body={"metadata": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_create_run(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.create_run( + thread_id="str", + body={ + "agent_id": "str", + "additional_instructions": "str", + "additional_messages": [ + { + "agent_id": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "completed_at": "2020-02-20 00:00:00", + "content": ["message_content"], + "created_at": "2020-02-20 00:00:00", + "id": "str", + "incomplete_at": "2020-02-20 00:00:00", + "incomplete_details": {"reason": "str"}, + "metadata": {"str": "str"}, + "object": "thread.message", + "role": "str", + "run_id": "str", + "status": "str", + "thread_id": "str", + } + ], + "instructions": "str", + "max_completion_tokens": 0, + "max_prompt_tokens": 0, + "metadata": {"str": "str"}, + "model": "str", + "response_format": "str", + "stream": bool, + "temperature": 0.0, + "tool_choice": "str", + "tools": ["tool_definition"], + "top_p": 0.0, + "truncation_strategy": {"type": "str", "last_messages": 0}, + }, + agent_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_list_runs(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.list_runs( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_get_run(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.get_run( + thread_id="str", + run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_update_run(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.update_run( + thread_id="str", + run_id="str", + body={"metadata": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_submit_tool_outputs_to_run(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.submit_tool_outputs_to_run( + thread_id="str", + run_id="str", + body={"tool_outputs": [{"output": "str", "tool_call_id": "str"}], "stream": bool}, + tool_outputs=[{"output": "str", "tool_call_id": "str"}], + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_cancel_run(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.cancel_run( + thread_id="str", + run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_create_thread_and_run(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.create_thread_and_run( + body={ + "agent_id": "str", + "instructions": "str", + "max_completion_tokens": 0, + "max_prompt_tokens": 0, + "metadata": {"str": "str"}, + "model": "str", + "response_format": "str", + "stream": bool, + "temperature": 0.0, + "thread": { + "messages": [ + { + "content": "str", + "role": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "metadata": {"str": "str"}, + } + ], + "metadata": {"str": "str"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + }, + "tool_choice": "str", + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + "tools": ["tool_definition"], + "top_p": 0.0, + "truncation_strategy": {"type": "str", "last_messages": 0}, + }, + agent_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_get_run_step(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.get_run_step( + thread_id="str", + run_id="str", + step_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_list_run_steps(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.list_run_steps( + thread_id="str", + run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_list_files(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.list_files() + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_upload_file(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.upload_file( + body={"file": "filetype", "purpose": "str", "filename": "str"}, + file="filetype", + purpose="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_delete_file(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.delete_file( + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_get_file(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.get_file( + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_get_file_content(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.get_file_content( + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_list_vector_stores(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.list_vector_stores() + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_create_vector_store(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.create_vector_store( + body={ + "chunking_strategy": "vector_store_chunking_strategy_request", + "expires_after": {"anchor": "str", "days": 0}, + "file_ids": ["str"], + "metadata": {"str": "str"}, + "name": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_get_vector_store(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.get_vector_store( + vector_store_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_modify_vector_store(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.modify_vector_store( + vector_store_id="str", + body={"expires_after": {"anchor": "str", "days": 0}, "metadata": {"str": "str"}, "name": "str"}, + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_delete_vector_store(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.delete_vector_store( + vector_store_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_list_vector_store_files(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.list_vector_store_files( + vector_store_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_create_vector_store_file(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.create_vector_store_file( + vector_store_id="str", + body={"file_id": "str", "chunking_strategy": "vector_store_chunking_strategy_request"}, + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_get_vector_store_file(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.get_vector_store_file( + vector_store_id="str", + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_delete_vector_store_file(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.delete_vector_store_file( + vector_store_id="str", + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_create_vector_store_file_batch(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.create_vector_store_file_batch( + vector_store_id="str", + body={"file_ids": ["str"], "chunking_strategy": "vector_store_chunking_strategy_request"}, + file_ids=["str"], + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_get_vector_store_file_batch(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.get_vector_store_file_batch( + vector_store_id="str", + batch_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_cancel_vector_store_file_batch(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.cancel_vector_store_file_batch( + vector_store_id="str", + batch_id="str", + ) + + # please add some check logic here by yourself + # ... + + @Preparer() + @recorded_by_proxy_async + async def test_agents_list_vector_store_file_batch_files(self, _endpoint): + client = self.create_async_client(endpoint=_endpoint) + response = await client.agents.list_vector_store_file_batch_files( + vector_store_id="str", + batch_id="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations.py b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations.py new file mode 100644 index 000000000000..2ef135f90409 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations.py @@ -0,0 +1,606 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils import recorded_by_proxy +from testpreparer import AzureAIClientTestBase, AzureAIPreparer + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestAzureAIAgentsOperations(AzureAIClientTestBase): + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_create_agent(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.create_agent( + body={ + "model": "str", + "description": "str", + "instructions": "str", + "metadata": {"str": "str"}, + "name": "str", + "response_format": "str", + "temperature": 0.0, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + "tools": ["tool_definition"], + "top_p": 0.0, + }, + model="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_list_agents(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.list_agents() + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_get_agent(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.get_agent( + assistant_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_update_agent(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.update_agent( + assistant_id="str", + body={ + "description": "str", + "instructions": "str", + "metadata": {"str": "str"}, + "model": "str", + "name": "str", + "response_format": "str", + "temperature": 0.0, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + "tools": ["tool_definition"], + "top_p": 0.0, + }, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_delete_agent(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.delete_agent( + assistant_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_create_thread(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.create_thread( + body={ + "messages": [ + { + "content": "str", + "role": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "metadata": {"str": "str"}, + } + ], + "metadata": {"str": "str"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + }, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_get_thread(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.get_thread( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_update_thread(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.update_thread( + thread_id="str", + body={ + "metadata": {"str": "str"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + }, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_delete_thread(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.delete_thread( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_create_message(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.create_message( + thread_id="str", + body={ + "content": "str", + "role": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "metadata": {"str": "str"}, + }, + role="str", + content="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_list_messages(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.list_messages( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_get_message(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.get_message( + thread_id="str", + message_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_update_message(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.update_message( + thread_id="str", + message_id="str", + body={"metadata": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_create_run(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.create_run( + thread_id="str", + body={ + "assistant_id": "str", + "additional_instructions": "str", + "additional_messages": [ + { + "assistant_id": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "completed_at": "2020-02-20 00:00:00", + "content": ["message_content"], + "created_at": "2020-02-20 00:00:00", + "id": "str", + "incomplete_at": "2020-02-20 00:00:00", + "incomplete_details": {"reason": "str"}, + "metadata": {"str": "str"}, + "object": "thread.message", + "role": "str", + "run_id": "str", + "status": "str", + "thread_id": "str", + } + ], + "instructions": "str", + "max_completion_tokens": 0, + "max_prompt_tokens": 0, + "metadata": {"str": "str"}, + "model": "str", + "response_format": "str", + "stream": bool, + "temperature": 0.0, + "tool_choice": "str", + "tools": ["tool_definition"], + "top_p": 0.0, + "truncation_strategy": {"type": "str", "last_messages": 0}, + }, + assistant_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_list_runs(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.list_runs( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_get_run(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.get_run( + thread_id="str", + run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_update_run(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.update_run( + thread_id="str", + run_id="str", + body={"metadata": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_submit_tool_outputs_to_run(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.submit_tool_outputs_to_run( + thread_id="str", + run_id="str", + body={"tool_outputs": [{"output": "str", "tool_call_id": "str"}], "stream": bool}, + tool_outputs=[{"output": "str", "tool_call_id": "str"}], + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_cancel_run(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.cancel_run( + thread_id="str", + run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_create_thread_and_run(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.create_thread_and_run( + body={ + "assistant_id": "str", + "instructions": "str", + "max_completion_tokens": 0, + "max_prompt_tokens": 0, + "metadata": {"str": "str"}, + "model": "str", + "response_format": "str", + "stream": bool, + "temperature": 0.0, + "thread": { + "messages": [ + { + "content": "str", + "role": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "metadata": {"str": "str"}, + } + ], + "metadata": {"str": "str"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + }, + "tool_choice": "str", + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + "tools": ["tool_definition"], + "top_p": 0.0, + "truncation_strategy": {"type": "str", "last_messages": 0}, + }, + assistant_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_get_run_step(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.get_run_step( + thread_id="str", + run_id="str", + step_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_list_run_steps(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.list_run_steps( + thread_id="str", + run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_list_files(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.list_files() + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_upload_file(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.upload_file( + body={"file": "filetype", "purpose": "str", "filename": "str"}, + file="filetype", + purpose="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_delete_file(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.delete_file( + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_get_file(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.get_file( + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_get_file_content(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.get_file_content( + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_list_vector_stores(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.list_vector_stores() + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_create_vector_store(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.create_vector_store( + body={ + "chunking_strategy": "vector_store_chunking_strategy_request", + "expires_after": {"anchor": "str", "days": 0}, + "file_ids": ["str"], + "metadata": {"str": "str"}, + "name": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_get_vector_store(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.get_vector_store( + vector_store_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_modify_vector_store(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.modify_vector_store( + vector_store_id="str", + body={"expires_after": {"anchor": "str", "days": 0}, "metadata": {"str": "str"}, "name": "str"}, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_delete_vector_store(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.delete_vector_store( + vector_store_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_list_vector_store_files(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.list_vector_store_files( + vector_store_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_create_vector_store_file(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.create_vector_store_file( + vector_store_id="str", + body={"file_id": "str", "chunking_strategy": "vector_store_chunking_strategy_request"}, + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_get_vector_store_file(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.get_vector_store_file( + vector_store_id="str", + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_delete_vector_store_file(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.delete_vector_store_file( + vector_store_id="str", + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_create_vector_store_file_batch(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.create_vector_store_file_batch( + vector_store_id="str", + body={"file_ids": ["str"], "chunking_strategy": "vector_store_chunking_strategy_request"}, + file_ids=["str"], + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_get_vector_store_file_batch(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.get_vector_store_file_batch( + vector_store_id="str", + batch_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_cancel_vector_store_file_batch(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.cancel_vector_store_file_batch( + vector_store_id="str", + batch_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_agents_list_vector_store_file_batch_files(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.agents.list_vector_store_file_batch_files( + vector_store_id="str", + batch_id="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations_async.py b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations_async.py new file mode 100644 index 000000000000..e893c51c1732 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations_async.py @@ -0,0 +1,607 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import pytest +from devtools_testutils.aio import recorded_by_proxy_async +from testpreparer import AzureAIPreparer +from testpreparer_async import AzureAIClientTestBaseAsync + + +@pytest.mark.skip("you may need to update the auto-generated test case before run it") +class TestAzureAIAgentsOperationsAsync(AzureAIClientTestBaseAsync): + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_create_agent(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.create_agent( + body={ + "model": "str", + "description": "str", + "instructions": "str", + "metadata": {"str": "str"}, + "name": "str", + "response_format": "str", + "temperature": 0.0, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + "tools": ["tool_definition"], + "top_p": 0.0, + }, + model="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_list_agents(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.list_agents() + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_get_agent(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.get_agent( + assistant_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_update_agent(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.update_agent( + assistant_id="str", + body={ + "description": "str", + "instructions": "str", + "metadata": {"str": "str"}, + "model": "str", + "name": "str", + "response_format": "str", + "temperature": 0.0, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + "tools": ["tool_definition"], + "top_p": 0.0, + }, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_delete_agent(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.delete_agent( + assistant_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_create_thread(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.create_thread( + body={ + "messages": [ + { + "content": "str", + "role": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "metadata": {"str": "str"}, + } + ], + "metadata": {"str": "str"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + }, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_get_thread(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.get_thread( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_update_thread(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.update_thread( + thread_id="str", + body={ + "metadata": {"str": "str"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + }, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_delete_thread(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.delete_thread( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_create_message(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.create_message( + thread_id="str", + body={ + "content": "str", + "role": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "metadata": {"str": "str"}, + }, + role="str", + content="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_list_messages(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.list_messages( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_get_message(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.get_message( + thread_id="str", + message_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_update_message(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.update_message( + thread_id="str", + message_id="str", + body={"metadata": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_create_run(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.create_run( + thread_id="str", + body={ + "assistant_id": "str", + "additional_instructions": "str", + "additional_messages": [ + { + "assistant_id": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "completed_at": "2020-02-20 00:00:00", + "content": ["message_content"], + "created_at": "2020-02-20 00:00:00", + "id": "str", + "incomplete_at": "2020-02-20 00:00:00", + "incomplete_details": {"reason": "str"}, + "metadata": {"str": "str"}, + "object": "thread.message", + "role": "str", + "run_id": "str", + "status": "str", + "thread_id": "str", + } + ], + "instructions": "str", + "max_completion_tokens": 0, + "max_prompt_tokens": 0, + "metadata": {"str": "str"}, + "model": "str", + "response_format": "str", + "stream": bool, + "temperature": 0.0, + "tool_choice": "str", + "tools": ["tool_definition"], + "top_p": 0.0, + "truncation_strategy": {"type": "str", "last_messages": 0}, + }, + assistant_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_list_runs(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.list_runs( + thread_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_get_run(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.get_run( + thread_id="str", + run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_update_run(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.update_run( + thread_id="str", + run_id="str", + body={"metadata": {"str": "str"}}, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_submit_tool_outputs_to_run(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.submit_tool_outputs_to_run( + thread_id="str", + run_id="str", + body={"tool_outputs": [{"output": "str", "tool_call_id": "str"}], "stream": bool}, + tool_outputs=[{"output": "str", "tool_call_id": "str"}], + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_cancel_run(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.cancel_run( + thread_id="str", + run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_create_thread_and_run(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.create_thread_and_run( + body={ + "assistant_id": "str", + "instructions": "str", + "max_completion_tokens": 0, + "max_prompt_tokens": 0, + "metadata": {"str": "str"}, + "model": "str", + "response_format": "str", + "stream": bool, + "temperature": 0.0, + "thread": { + "messages": [ + { + "content": "str", + "role": "str", + "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], + "metadata": {"str": "str"}, + } + ], + "metadata": {"str": "str"}, + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + }, + "tool_choice": "str", + "tool_resources": { + "code_interpreter": {"file_ids": ["str"]}, + "file_search": {"vector_store_ids": ["str"]}, + }, + "tools": ["tool_definition"], + "top_p": 0.0, + "truncation_strategy": {"type": "str", "last_messages": 0}, + }, + assistant_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_get_run_step(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.get_run_step( + thread_id="str", + run_id="str", + step_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_list_run_steps(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.list_run_steps( + thread_id="str", + run_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_list_files(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.list_files() + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_upload_file(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.upload_file( + body={"file": "filetype", "purpose": "str", "filename": "str"}, + file="filetype", + purpose="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_delete_file(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.delete_file( + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_get_file(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.get_file( + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_get_file_content(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.get_file_content( + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_list_vector_stores(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.list_vector_stores() + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_create_vector_store(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.create_vector_store( + body={ + "chunking_strategy": "vector_store_chunking_strategy_request", + "expires_after": {"anchor": "str", "days": 0}, + "file_ids": ["str"], + "metadata": {"str": "str"}, + "name": "str", + }, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_get_vector_store(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.get_vector_store( + vector_store_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_modify_vector_store(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.modify_vector_store( + vector_store_id="str", + body={"expires_after": {"anchor": "str", "days": 0}, "metadata": {"str": "str"}, "name": "str"}, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_delete_vector_store(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.delete_vector_store( + vector_store_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_list_vector_store_files(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.list_vector_store_files( + vector_store_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_create_vector_store_file(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.create_vector_store_file( + vector_store_id="str", + body={"file_id": "str", "chunking_strategy": "vector_store_chunking_strategy_request"}, + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_get_vector_store_file(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.get_vector_store_file( + vector_store_id="str", + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_delete_vector_store_file(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.delete_vector_store_file( + vector_store_id="str", + file_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_create_vector_store_file_batch(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.create_vector_store_file_batch( + vector_store_id="str", + body={"file_ids": ["str"], "chunking_strategy": "vector_store_chunking_strategy_request"}, + file_ids=["str"], + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_get_vector_store_file_batch(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.get_vector_store_file_batch( + vector_store_id="str", + batch_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_cancel_vector_store_file_batch(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.cancel_vector_store_file_batch( + vector_store_id="str", + batch_id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_agents_list_vector_store_file_batch_files(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.agents.list_vector_store_file_batch_files( + vector_store_id="str", + batch_id="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py new file mode 100644 index 000000000000..312239698b1f --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py @@ -0,0 +1,61 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import os, time, logging +from azure.ai.client import AzureAIClient +from azure.identity import DefaultAzureCredential + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, + # logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +""" +ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) +""" +agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" +) +print("Created agent, agent ID", agent.id) + +thread = ai_client.agents.create_thread() +print("Created thread, thread ID", thread.id) + +message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") +print("Created message, message ID", message.id) + +run = ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) +print("Created run, run ID", run.id) + +# poll the run as long as run status is queued or in progress +while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + print("Run status:", run.status) + +print("Run completed with status:", run.status) + +ai_client.agents.delete_agent(agent.id) +print("Deleted agent") + +messages = ai_client.agents.list_messages(thread_id=thread.id) +print("messages:", messages) diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index cd94e5b58f27..3c5624009787 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -1,6 +1,7 @@ # These are needed for SDK logging. You can ignore them. import sys import logging + logger = logging.getLogger("azure") logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(stream=sys.stdout)) @@ -20,7 +21,7 @@ ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), connection=os.environ["AI_CLIENT_CONNECTION_STRING"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) # Or, you can create the Azure AI Client by giving all required parameters directly @@ -36,24 +37,14 @@ # You can get an authenticated azure.ai.inference chat completions client directly, if you have a serverless endpoint in your project: client = ai_client.inference.get_chat_completions_client() -response = client.complete( - messages=[ - UserMessage(content="How many feet are in a mile?") - ] -) +response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) print(response.choices[0].message.content) # You can get an authenticated azure.ai.inference embeddings client directly, if you have a serverless endpoint in your project: client = ai_client.inference.get_embeddings_client() -response = client.embed( - input=[ - "first phrase", - "second phrase", - "third phrase" - ] -) +response = client.embed(input=["first phrase", "second phrase", "third phrase"]) for item in response.data: length = len(item.embedding) @@ -148,10 +139,7 @@ if endpoint.authentication_type == AuthenticationType.API_KEY: print("====> Creating ChatCompletionsClient using API key authentication") - client = ChatCompletionsClient( - endpoint=endpoint.endpoint_url, - credential=AzureKeyCredential(endpoint.key) - ) + client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) elif endpoint.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") diff --git a/sdk/ai/azure-ai-client/tsp-location.yaml b/sdk/ai/azure-ai-client/tsp-location.yaml index a941b4597271..3d64c8315a3c 100644 --- a/sdk/ai/azure-ai-client/tsp-location.yaml +++ b/sdk/ai/azure-ai-client/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Client -commit: c989add6689ea68698d8ec219203ab19269363d6 +commit: e8d6573daa810bed2d54db7da045c629731239bc repo: Azure/azure-rest-api-specs additionalDirectories: From f23767111a28052a5c5b71159faf51955825e7f8 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 7 Oct 2024 17:59:58 -0700 Subject: [PATCH 014/138] Create seperate inference samples --- .../samples/endpoints/sample_endpoints.py | 75 +++++-------------- .../sample_get_azure_openai_client.py | 36 +++++++++ .../sample_get_chat_completions_client.py | 28 +++++++ .../inference/sample_get_embeddings_client.py | 33 ++++++++ 4 files changed, 114 insertions(+), 58 deletions(-) create mode 100644 sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py create mode 100644 sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py create mode 100644 sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index 3c5624009787..de2fabd8d81b 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -1,12 +1,3 @@ -# These are needed for SDK logging. You can ignore them. -import sys -import logging - -logger = logging.getLogger("azure") -logger.setLevel(logging.DEBUG) -logger.addHandler(logging.StreamHandler(stream=sys.stdout)) -# End of logging setup - import os from azure.ai.client import AzureAIClient from azure.ai.client.models import EndpointType, AuthenticationType @@ -17,7 +8,7 @@ from azure.core.credentials import AzureKeyCredential # Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" +# It should be in the format ";;;" ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), connection=os.environ["AI_CLIENT_CONNECTION_STRING"], @@ -25,59 +16,25 @@ ) # Or, you can create the Azure AI Client by giving all required parameters directly -ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - endpoint=os.environ["AI_CLIENT_ENDPOINT"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging -) - -# You can get an authenticated azure.ai.inference chat completions client directly, if you have a serverless endpoint in your project: -client = ai_client.inference.get_chat_completions_client() - -response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - -print(response.choices[0].message.content) - -# You can get an authenticated azure.ai.inference embeddings client directly, if you have a serverless endpoint in your project: -client = ai_client.inference.get_embeddings_client() - -response = client.embed(input=["first phrase", "second phrase", "third phrase"]) - -for item in response.data: - length = len(item.embedding) - print( - f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " - f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" - ) - -# You can get an authenticated AzureOpenAI client directly, if you have an Azure OpenAI endpoint in your project: -client = ai_client.inference.get_azure_openai_client() - -response = client.chat.completions.create( - model="gpt-4-0613", - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], -) - -print(response.choices[0].message.content) - -# You can list all endpoints of a particular "type", with or without their credentials: +# ai_client = AzureAIClient( +# credential=DefaultAzureCredential(), +# endpoint=os.environ["AI_CLIENT_ENDPOINT"], +# subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], +# resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], +# workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], +# logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +# ) + +# List all endpoints of a particular "type", with or without their credentials: endpoints = ai_client.endpoints.list( endpoint_type=EndpointType.AZURE_OPEN_AI, # Optional. Defaults to all types. populate_secrets=True, # Optional. Defaults to "False" ) -print("====> Listing all Azure Open AI endpoints:") +print("====> Listing of all Azure Open AI endpoints:") for endpoint in endpoints: print(endpoint) -# You can get the default endpoint of a particular "type" (note that since at the moment the service +# Get the default endpoint of a particular "type" (note that since at the moment the service # does not have a notion of a default endpoint, this will return the first endpoint of that type): endpoint = ai_client.endpoints.get_default( endpoint_type=EndpointType.AZURE_OPEN_AI, populate_secrets=True # Required. # Optional. Defaults to "False" @@ -85,13 +42,15 @@ print("====> Get default Azure Open AI endpoint:") print(endpoint) -# You can get an endpoint by its name: +# Get an endpoint by its name: endpoint = ai_client.endpoints.get( endpoint_name=os.environ["AI_CLIENT_CONNECTION_NAME"], populate_secrets=True # Required. ) -print("====> Print properties of a particular endpoint:") +print("====> Get endpoint by name:") print(endpoint) +exit() + # Here is how you would create the appropriate AOAI or Inference SDK for these endpoint if endpoint.endpoint_type == EndpointType.AZURE_OPEN_AI: diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py new file mode 100644 index 000000000000..a83e6ce67204 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py @@ -0,0 +1,36 @@ +import os +from azure.ai.client import AzureAIClient +from azure.identity import DefaultAzureCredential + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# It should have the format ";;;" +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=os.environ["AI_CLIENT_CONNECTION_STRING"], +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +# ai_client = AzureAIClient( +# credential=DefaultAzureCredential(), +# endpoint=os.environ["AI_CLIENT_ENDPOINT"], +# subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], +# resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], +# workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], +# ) + +# Get an authenticated OpenAI client for your default Azure OpenAI connection: +client = ai_client.inference.get_azure_openai_client() + +response = client.chat.completions.create( + model="gpt-4-0613", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], +) + +print(response.choices[0].message.content) + + diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py new file mode 100644 index 000000000000..4eb22c61e4fb --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py @@ -0,0 +1,28 @@ +import os +from azure.ai.client import AzureAIClient +from azure.ai.inference.models import UserMessage +from azure.identity import DefaultAzureCredential + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# It should have the format ";;;" +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=os.environ["AI_CLIENT_CONNECTION_STRING"], +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +# ai_client = AzureAIClient( +# credential=DefaultAzureCredential(), +# endpoint=os.environ["AI_CLIENT_ENDPOINT"], +# subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], +# resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], +# workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], +# ) + +# Get an authenticated azure.ai.inference chat completions client for your default Serverless connection: +client = ai_client.inference.get_chat_completions_client() + +response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + +print(response.choices[0].message.content) + diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py new file mode 100644 index 000000000000..926d3ccd9973 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py @@ -0,0 +1,33 @@ +import os +from azure.ai.client import AzureAIClient +from azure.identity import DefaultAzureCredential + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# It should have the format ";;;" +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=os.environ["AI_CLIENT_CONNECTION_STRING"], +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +# ai_client = AzureAIClient( +# credential=DefaultAzureCredential(), +# endpoint=os.environ["AI_CLIENT_ENDPOINT"], +# subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], +# resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], +# workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], +# ) + +# Get an authenticated azure.ai.inference embeddings client for your default Serverless connection: +client = ai_client.inference.get_embeddings_client() + +response = client.embed(input=["first phrase", "second phrase", "third phrase"]) + +for item in response.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) + + From 367f88ba495929b9e5f0c620e118442622c23746 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 8 Oct 2024 10:31:29 -0700 Subject: [PATCH 015/138] Move SASTokenCredential into .models --- .../azure-ai-client/azure/ai/client/_patch.py | 64 +-------------- .../azure/ai/client/models/_patch.py | 81 ++++++++++++++++--- .../azure/ai/client/operations/_patch.py | 3 +- .../tests/endpoints/unit_tests.py | 4 +- 4 files changed, 77 insertions(+), 75 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index d05ae45e0670..2868b24d475b 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -6,12 +6,8 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import datetime -import logging -import base64 -import json -from typing import List, Tuple, Union, Any -from azure.core.credentials import TokenCredential, AccessToken +from typing import List, Any +from azure.core.credentials import TokenCredential from azure.core import PipelineClient from azure.core.pipeline import policies from ._configuration import AzureAIClientConfiguration as ClientConfiguration @@ -20,9 +16,6 @@ from ._client import AzureAIClient as ClientGenerated from .operations._patch import InferenceOperations -logger = logging.getLogger(__name__) - - class AzureAIClient(ClientGenerated): def __init__( @@ -175,61 +168,8 @@ def from_connection_string(cls, connection: str, credential: "TokenCredential", return cls(endpoint, subscription_id, resource_group_name, workspace_name, credential, **kwargs) -class SASTokenCredential(TokenCredential): - def __init__( - self, - *, - sas_token: str, - credential: TokenCredential, - subscription_id: str, - resource_group_name: str, - workspace_name: str, - connection_name: str, - ): - self._sas_token = sas_token - self._credential = credential - self._subscription_id = subscription_id - self._resource_group_name = resource_group_name - self._workspace_name = workspace_name - self._connection_name = connection_name - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) - - @classmethod - def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime: - payload = jwt_token.split(".")[1] - padded_payload = payload + "=" * (4 - len(payload) % 4) # Add padding if necessary - decoded_bytes = base64.urlsafe_b64decode(padded_payload) - decoded_str = decoded_bytes.decode("utf-8") - decoded_payload = json.loads(decoded_str) - expiration_date = decoded_payload.get("exp") - return datetime.datetime.fromtimestamp(expiration_date, datetime.timezone.utc) - - def _refresh_token(self) -> None: - logger.debug("[SASTokenCredential._refresh_token] Enter") - ai_client = ClientGenerated( - credential=self._credential, - subscription_id=self._subscription_id, - resource_group_name=self._resource_group_name, - workspace_name=self._workspace_name, - ) - - connection = ai_client.connections.get(connection_name=self._connection_name, populate_secrets=True) - - self._sas_token = connection.properties.credentials.sas - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) - - def get_token(self) -> AccessToken: - logger.debug("SASTokenCredential.get_token] Enter") - if self._expires_on < datetime.datetime.now(datetime.timezone.utc): - self._refresh_token() - return AccessToken(self._sas_token, self._expires_on.timestamp()) - - __all__: List[str] = [ "AzureAIClient", - "SASTokenCredential", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index 78a4226595c5..678d857e8545 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -7,13 +7,22 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ import datetime +import inspect +import json +import logging +import base64 + from typing import List -from azure.core.credentials import TokenCredential -from ._models import ConnectionsListSecretsResponse +from azure.core.credentials import TokenCredential, AccessToken from ._enums import AgentStreamEvent -from ._models import MessageDeltaChunk, ThreadRun, RunStep, ThreadMessage, RunStepDeltaChunk from ._models import ( + ConnectionsListSecretsResponse, + MessageDeltaChunk, + ThreadRun, + RunStep, + ThreadMessage, + RunStepDeltaChunk, FunctionToolDefinition, FunctionDefinition, ToolDefinition, @@ -28,8 +37,7 @@ from abc import ABC, abstractmethod from typing import AsyncIterator, List, Dict, Any, Type, Optional, Iterator, Tuple, get_origin -import inspect, json, logging - +logger = logging.getLogger(__name__) class EndpointProperties: @@ -64,6 +72,60 @@ def __str__(self): return out +class SASTokenCredential(TokenCredential): + def __init__( + self, + *, + sas_token: str, + credential: TokenCredential, + subscription_id: str, + resource_group_name: str, + workspace_name: str, + connection_name: str, + ): + self._sas_token = sas_token + self._credential = credential + self._subscription_id = subscription_id + self._resource_group_name = resource_group_name + self._workspace_name = workspace_name + self._connection_name = connection_name + self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) + logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) + + @classmethod + def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime: + payload = jwt_token.split(".")[1] + padded_payload = payload + "=" * (4 - len(payload) % 4) # Add padding if necessary + decoded_bytes = base64.urlsafe_b64decode(padded_payload) + decoded_str = decoded_bytes.decode("utf-8") + decoded_payload = json.loads(decoded_str) + expiration_date = decoded_payload.get("exp") + return datetime.datetime.fromtimestamp(expiration_date, datetime.timezone.utc) + + def _refresh_token(self) -> None: + logger.debug("[SASTokenCredential._refresh_token] Enter") + from azure.ai.client import AzureAIClient + ai_client = AzureAIClient( + credential=self._credential, + endpoint = "not-needed", # Since we are only going to use the "endpoints" operations, we don't need to supply an endpoint. http://management.azure.com is hard coded in the SDK. + subscription_id=self._subscription_id, + resource_group_name=self._resource_group_name, + workspace_name=self._workspace_name, + ) + + connection = ai_client.endpoints.get(endpoint_name=self._connection_name, populate_secrets=True) + + self._sas_token = connection.properties.credentials.sas + self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) + logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) + + def get_token(self) -> AccessToken: + logger.debug("SASTokenCredential.get_token] Enter") + if self._expires_on < datetime.datetime.now(datetime.timezone.utc): + self._refresh_token() + return AccessToken(self._sas_token, self._expires_on.timestamp()) + + # Define type_map to translate Python type annotations to JSON Schema types type_map = { "str": "string", @@ -743,15 +805,16 @@ def until_done(self) -> None: __all__: List[str] = [ - "AsyncAgentEventHandler", "AgentEventHandler", - "AsyncAgentRunStream", "AgentRunStream", + "AsyncAgentEventHandler", + "AsyncAgentRunStream", "AsyncFunctionTool", "AsyncToolSet", - "FunctionTool", - "FileSearchTool", "CodeInterpreterTool", + "FileSearchTool", + "FunctionTool", + "SASTokenCredential", "Tool", "ToolSet", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index 3d994f738397..134007731cbb 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -202,8 +202,7 @@ def get(self, *, endpoint_name: str, populate_secrets: bool = False) -> Connecti if connection.properties.auth_type == AuthenticationType.AAD: return EndpointProperties(connection=connection, token_credential=self._config.credential) elif connection.properties.auth_type == AuthenticationType.SAS: - from .._patch import SASTokenCredential - + from ..models._patch import SASTokenCredential token_credential = SASTokenCredential( sas_token=connection.properties.credentials.sas, credential=self._config.credential, diff --git a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py index 07a021d58fb4..7bd10564b4d0 100644 --- a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py +++ b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py @@ -5,7 +5,7 @@ import sys import logging import datetime -from azure.ai.client import SASTokenCredential +from azure.ai.client.models import SASTokenCredential from azure.core.credentials import TokenCredential, AccessToken from azure.core.exceptions import HttpResponseError @@ -47,7 +47,7 @@ def test_sas_token_credential_class_mocked(self, **kwargs): import datetime import time - # Create a simple JWT with 10 seconds expieration time + # Create a simple JWT with 10 seconds expiration time token_duration_sec = 5 secret_key = "my_secret_key" token_duration_sec = 5 From 7bcf7055c00fe4998a91e6421fac1a2616761888 Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Tue, 8 Oct 2024 10:51:42 -0800 Subject: [PATCH 016/138] Add more agents samples (#37777) * add more samples * adjust sleep * review updates --- .../samples/agents/sample_agents_basics.py | 1 - .../samples/agents/sample_agents_functions.py | 89 ++++++++++++++++++ .../agents/sample_agents_run_with_toolset.py | 70 ++++++++++++++ .../sample_agents_stream_eventhandler.py | 94 +++++++++++++++++++ .../agents/sample_agents_stream_iteration.py | 86 +++++++++++++++++ .../samples/agents/user_functions.py | 68 ++++++++++++++ 6 files changed, 407 insertions(+), 1 deletion(-) create mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/user_functions.py diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py index 312239698b1f..8c11a30de0b0 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py @@ -16,7 +16,6 @@ ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), connection=connection_string, - # logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) # Or, you can create the Azure AI Client by giving all required parameters directly diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py new file mode 100644 index 000000000000..174ba4c98241 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py @@ -0,0 +1,89 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import os, time +from azure.ai.client import AzureAIClient +from azure.identity import DefaultAzureCredential +from azure.ai.client.models import FunctionTool +from user_functions import user_functions + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +""" +ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) +""" + +# Initialize function tool with user functions +functions = FunctionTool(functions=user_functions) + +# Create an agent and run user's request with function calls +agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", tools=functions.definitions +) +print(f"Created agent, ID: {agent.id}") + +thread = ai_client.agents.create_thread() +print(f"Created thread, ID: {thread.id}") + +message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, send an email with the datetime and weather information in New York?") +print(f"Created message, ID: {message.id}") + +run = ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) +print(f"Created run, ID: {run.id}") + +while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + ai_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + output = functions.execute(tool_call) + tool_output = { + "tool_call_id": tool_call.id, + "output": output, + } + tool_outputs.append(tool_output) + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + ai_client.agents.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + +print(f"Run completed with status: {run.status}") + +# Delete the agent when done +ai_client.agents.delete_agent(agent.id) +print("Deleted agent") + +# Fetch and log all messages +messages = ai_client.agents.list_messages(thread_id=thread.id) +print(f"Messages: {messages}") \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py new file mode 100644 index 000000000000..6823de1233bf --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py @@ -0,0 +1,70 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import os +from azure.ai.client import AzureAIClient +from azure.identity import DefaultAzureCredential +from azure.ai.client.models import FunctionTool, ToolSet, CodeInterpreterTool +from user_functions import user_functions + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +""" +ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) +""" + +# Initialize agent toolset with user functions and code interpreter +functions = FunctionTool(user_functions) +code_interpreter = CodeInterpreterTool() + +toolset = ToolSet() +toolset.add(functions) +toolset.add(code_interpreter) + +# Create agent with toolset and process assistant run +agent = ai_client.agents.create_agent( + model="gpt-4o-mini", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset +) +print(f"Created agent, ID: {agent.id}") + +# Create thread for communication +thread = ai_client.agents.create_thread() +print(f"Created thread, ID: {thread.id}") + +# Create message to thread +message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, send an email with the datetime and weather information in New York?") +print(f"Created message, ID: {message.id}") + +# Create and process agent run in thread with tools +run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) +print(f"Run finished with status: {run.status}") + +if run.status == "failed": + print(f"Run failed: {run.last_error}") + +# Delete the assistant when done +ai_client.agents.delete_agent(agent.id) +print("Deleted agent") + +# Fetch and log all messages +messages = ai_client.agents.list_messages(thread_id=thread.id) +print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py new file mode 100644 index 000000000000..d02f64ead043 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py @@ -0,0 +1,94 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import os +from azure.ai.client import AzureAIClient +from azure.identity import DefaultAzureCredential + +from azure.ai.client.models import ( + AgentEventHandler, + MessageDeltaTextContent, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) + +from typing import Any + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +""" +ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) +""" + +class MyEventHandler(AgentEventHandler): + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +# Create an agent and run stream with event handler +agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" +) +print(f"Created agent, agent ID {agent.id}") + +thread = ai_client.agents.create_thread() +print(f"Created thread, thread ID {thread.id}") + +message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") +print(f"Created message, message ID {message.id}") + +with ai_client.agents.create_and_process_run( + thread_id=thread.id, + assistant_id=agent.id, + stream=True, + event_handler=MyEventHandler() +) as stream: + stream.until_done() + +ai_client.agents.delete_agent(agent.id) +print("Deleted agent") + +messages = ai_client.agents.list_messages(thread_id=thread.id) +print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py new file mode 100644 index 000000000000..92110a6914fe --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py @@ -0,0 +1,86 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import os +from azure.ai.client import AzureAIClient +from azure.identity import DefaultAzureCredential +from azure.ai.client.models import ( + AgentStreamEvent, + MessageDeltaTextContent, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +""" +ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) +""" + +# Create an agent and run stream with iteration +agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" +) +print(f"Created agent, ID {agent.id}") + +thread = ai_client.agents.create_thread() +print(f"Created thread, thread ID {thread.id}") + +message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") +print(f"Created message, message ID {message.id}") + +with ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, stream=True) as stream: + + for event_type, event_data in stream: + + if isinstance(event_data, MessageDeltaChunk): + for content_part in event_data.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + +ai_client.agents.delete_agent(agent.id) +print("Deleted agent") + +messages = ai_client.agents.list_messages(thread_id=thread.id) +print(f"Messages: {messages}") \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/samples/agents/user_functions.py b/sdk/ai/azure-ai-client/samples/agents/user_functions.py new file mode 100644 index 000000000000..5c787f972fd3 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/user_functions.py @@ -0,0 +1,68 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import json +import datetime + +# These are the user-defined functions that can be called by the agent. + +def fetch_current_datetime() -> str: + """ + Get the current time as a JSON string. + + :return: The current time in JSON format. + :rtype: str + """ + current_time = datetime.datetime.now() + time_json = json.dumps({"current_time": current_time.strftime("%Y-%m-%d %H:%M:%S")}) + return time_json + + +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = { + "New York": "Sunny, 25°C", + "London": "Cloudy, 18°C", + "Tokyo": "Rainy, 22°C" + } + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +def send_email(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Email address of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +# Statically defined user functions for fast reference +user_functions = { + "fetch_current_datetime": fetch_current_datetime, + "fetch_weather": fetch_weather, + "send_email": send_email +} From ae5a0af86325c0a23271df6281c82a493c7a2999 Mon Sep 17 00:00:00 2001 From: Ankit Singhal <30610298+singankit@users.noreply.github.com> Date: Tue, 8 Oct 2024 13:10:24 -0700 Subject: [PATCH 017/138] Update _patch.py (#37760) --- sdk/ai/azure-ai-client/azure/ai/client/_patch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index 2868b24d475b..dafe47fac99a 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -118,7 +118,7 @@ def __init__( workspace_name=workspace_name, credential=credential, api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://management.azure.com"], # TODO: Update once service changes are ready + credential_scopes=["https://ml.azure.com"], # TODO: Update once service changes are ready **kwargs3, ) _policies3 = kwargs3.pop("policies", None) From b540375694fce9893b4c2353a2d03d24657c2e81 Mon Sep 17 00:00:00 2001 From: Ankit Singhal <30610298+singankit@users.noreply.github.com> Date: Tue, 8 Oct 2024 15:06:46 -0700 Subject: [PATCH 018/138] Online Evaluation changes (#37794) --- .../azure/ai/client/_client.py | 3 +- .../azure/ai/client/_configuration.py | 1 - .../azure/ai/client/_model_base.py | 5 +- .../azure-ai-client/azure/ai/client/_patch.py | 1 + .../azure/ai/client/_serialization.py | 1 - .../azure-ai-client/azure/ai/client/_types.py | 2 - .../azure/ai/client/aio/_client.py | 3 +- .../azure/ai/client/aio/_configuration.py | 1 - .../ai/client/aio/operations/_operations.py | 570 ++++++++++++-- .../azure/ai/client/models/__init__.py | 18 +- .../azure/ai/client/models/_enums.py | 22 + .../azure/ai/client/models/_models.py | 258 ++++++- .../azure/ai/client/models/_patch.py | 4 +- .../azure/ai/client/operations/__init__.py | 1 - .../azure/ai/client/operations/_operations.py | 694 ++++++++++++++++-- .../azure/ai/client/operations/_patch.py | 2 + .../agents_cancel_run_maximum_set_gen.py | 45 ++ ...vector_store_file_batch_minimum_set_gen.py | 45 ++ .../agents_delete_agent_minimum_set_gen.py | 44 ++ .../agents_delete_file_minimum_set_gen.py | 44 ++ .../agents_delete_thread_minimum_set_gen.py | 44 ++ ...elete_vector_store_file_minimum_set_gen.py | 45 ++ ...nts_delete_vector_store_minimum_set_gen.py | 44 ++ .../agents_get_agent_minimum_set_gen.py | 44 ++ ...agents_get_file_content_minimum_set_gen.py | 44 ++ .../agents_get_file_minimum_set_gen.py | 44 ++ .../agents_get_message_minimum_set_gen.py | 45 ++ .../agents_get_run_maximum_set_gen.py | 45 ++ .../agents_get_run_step_minimum_set_gen.py | 46 ++ .../agents_get_thread_minimum_set_gen.py | 44 ++ ...vector_store_file_batch_minimum_set_gen.py | 45 ++ ...s_get_vector_store_file_minimum_set_gen.py | 45 ++ ...agents_get_vector_store_minimum_set_gen.py | 44 ++ .../agents_list_agents_minimum_set_gen.py | 42 ++ .../agents_list_files_minimum_set_gen.py | 42 ++ .../agents_list_messages_minimum_set_gen.py | 44 ++ .../agents_list_run_steps_minimum_set_gen.py | 45 ++ .../agents_list_runs_maximum_set_gen.py | 44 ++ ..._store_file_batch_files_minimum_set_gen.py | 45 ++ ...list_vector_store_files_minimum_set_gen.py | 44 ++ ...ents_list_vector_stores_minimum_set_gen.py | 42 ++ .../agents_update_run_maximum_set_gen.py | 46 ++ .../evaluations_create_maximum_set_gen.py | 55 ++ .../evaluations_create_minimum_set_gen.py | 47 ++ .../evaluations_get_maximum_set_gen.py | 44 ++ .../evaluations_get_minimum_set_gen.py | 44 ++ .../evaluations_list_maximum_set_gen.py | 43 ++ .../evaluations_list_minimum_set_gen.py | 43 ++ .../evaluations_update_maximum_set_gen.py | 49 ++ .../test_azure_ai_evaluations_operations.py | 75 ++ ...t_azure_ai_evaluations_operations_async.py | 75 ++ .../samples/agents/sample_agents_functions.py | 23 +- .../agents/sample_agents_run_with_toolset.py | 8 +- .../sample_agents_stream_eventhandler.py | 6 +- .../agents/sample_agents_stream_iteration.py | 2 +- .../samples/agents/user_functions.py | 13 +- .../sample_get_azure_openai_client.py | 2 - .../sample_get_chat_completions_client.py | 1 - .../inference/sample_get_embeddings_client.py | 2 - sdk/ai/azure-ai-client/tsp-location.yaml | 2 +- 60 files changed, 3083 insertions(+), 188 deletions(-) create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_cancel_run_maximum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_cancel_vector_store_file_batch_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_delete_agent_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_delete_file_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_delete_thread_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_file_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_agent_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_file_content_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_file_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_message_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_run_maximum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_run_step_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_thread_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_batch_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_agents_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_files_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_messages_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_run_steps_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_runs_maximum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_file_batch_files_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_files_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_vector_stores_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_update_run_maximum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_create_maximum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_create_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_get_maximum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_get_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_list_maximum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_list_minimum_set_gen.py create mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_update_maximum_set_gen.py diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/_client.py index cb5b3efc371a..bd5efc4521b1 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_client.py @@ -19,11 +19,10 @@ from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential -class AzureAIClient: # pylint: disable=client-accepts-api-version-keyword +class AzureAIClient: """AzureAIClient. :ivar agents: AgentsOperations operations diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py index e6af9a8ee17e..ee10b245b611 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py @@ -13,7 +13,6 @@ from ._version import VERSION if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_model_base.py b/sdk/ai/azure-ai-client/azure/ai/client/_model_base.py index 12ad7f29c71e..9d401b0cf012 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_model_base.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_model_base.py @@ -1,3 +1,4 @@ +# pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -636,7 +637,7 @@ def _deserialize(cls, data, exist_discriminators): return mapped_cls._deserialize(data, exist_discriminators) def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: - """Return a dict that can be JSONify using json.dump. + """Return a dict that can be turned into json using json.dump. :keyword bool exclude_readonly: Whether to remove the readonly properties. :returns: A dict JSON compatible object @@ -733,7 +734,7 @@ def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.An ) -def _get_deserialize_callable_from_annotation( # pylint: disable=R0911, R0915, R0912 +def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-branches annotation: typing.Any, module: typing.Optional[str], rf: typing.Optional["_RestField"] = None, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index dafe47fac99a..34ef3ed53064 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -16,6 +16,7 @@ from ._client import AzureAIClient as ClientGenerated from .operations._patch import InferenceOperations + class AzureAIClient(ClientGenerated): def __init__( diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py b/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py index 01a226bd7f14..7b3074215a30 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-lines # -------------------------------------------------------------------------- # # Copyright (c) Microsoft Corporation. All rights reserved. diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_types.py b/sdk/ai/azure-ai-client/azure/ai/client/_types.py index 4c06fd33de50..c438829bda41 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_types.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_types.py @@ -1,5 +1,4 @@ # coding=utf-8 -# pylint: disable=too-many-lines # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. @@ -10,7 +9,6 @@ from typing import TYPE_CHECKING, Union if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from . import models as _models from .. import models as _models AgentsApiResponseFormatOption = Union[ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py index 5c9645c1f9b9..53eb218b6823 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py @@ -19,11 +19,10 @@ from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential -class AzureAIClient: # pylint: disable=client-accepts-api-version-keyword +class AzureAIClient: """AzureAIClient. :ivar agents: AgentsOperations operations diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py index 7ba861c532db..eb7879780472 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py @@ -13,7 +13,6 @@ from .._version import VERSION if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py index 9d8926ce14a4..f29d76e51ded 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -9,7 +9,7 @@ from io import IOBase import json import sys -from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, TYPE_CHECKING, Type, TypeVar, Union, overload +from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, TYPE_CHECKING, TypeVar, Union, overload import urllib.parse from azure.core.async_paging import AsyncItemPaged, AsyncList @@ -76,18 +76,22 @@ build_endpoints_list_request, build_endpoints_list_secrets_request, build_evaluations_create_request, + build_evaluations_create_schedule_request, + build_evaluations_delete_schedule_request, build_evaluations_get_request, + build_evaluations_get_schedule_request, build_evaluations_list_request, + build_evaluations_list_schedule_evaluations_request, + build_evaluations_list_schedules_request, build_evaluations_update_request, ) if sys.version_info >= (3, 9): from collections.abc import MutableMapping else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + from typing import MutableMapping # type: ignore if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from ... import _types JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() @@ -266,7 +270,7 @@ async def create_agent( :rtype: ~azure.ai.client.models.Agent :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -379,7 +383,7 @@ async def list_agents( :rtype: ~azure.ai.client.models.OpenAIPageableListOfAgent :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -447,7 +451,7 @@ async def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: :rtype: ~azure.ai.client.models.Agent :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -672,7 +676,7 @@ async def update_agent( :rtype: ~azure.ai.client.models.Agent :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -761,7 +765,7 @@ async def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentD :rtype: ~azure.ai.client.models.AgentDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -914,7 +918,7 @@ async def create_thread( :rtype: ~azure.ai.client.models.AgentThread :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -991,7 +995,7 @@ async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread :rtype: ~azure.ai.client.models.AgentThread :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1146,7 +1150,7 @@ async def update_thread( :rtype: ~azure.ai.client.models.AgentThread :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1224,7 +1228,7 @@ async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDe :rtype: ~azure.ai.client.models.ThreadDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1403,7 +1407,7 @@ async def create_message( :rtype: ~azure.ai.client.models.ThreadMessage :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1514,7 +1518,7 @@ async def list_messages( :rtype: ~azure.ai.client.models.OpenAIPageableListOfThreadMessage :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1586,7 +1590,7 @@ async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _ :rtype: ~azure.ai.client.models.ThreadMessage :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1738,7 +1742,7 @@ async def update_message( :rtype: ~azure.ai.client.models.ThreadMessage :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2045,7 +2049,7 @@ async def create_run( :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2167,7 +2171,7 @@ async def list_runs( :rtype: ~azure.ai.client.models.OpenAIPageableListOfThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2238,7 +2242,7 @@ async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.T :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2390,7 +2394,7 @@ async def update_run( :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2565,7 +2569,7 @@ async def submit_tool_outputs_to_run( :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2648,7 +2652,7 @@ async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _model :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2930,7 +2934,7 @@ async def create_thread_and_run( :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3029,7 +3033,7 @@ async def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs :rtype: ~azure.ai.client.models.RunStep :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3125,7 +3129,7 @@ async def list_run_steps( :rtype: ~azure.ai.client.models.OpenAIPageableListOfRunStep :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3199,7 +3203,7 @@ async def list_files( :rtype: ~azure.ai.client.models.FileListResponse :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3312,7 +3316,7 @@ async def upload_file( :rtype: ~azure.ai.client.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3390,7 +3394,7 @@ async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletion :rtype: ~azure.ai.client.models.FileDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3455,7 +3459,7 @@ async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: :rtype: ~azure.ai.client.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3520,7 +3524,7 @@ async def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileCon :rtype: ~azure.ai.client.models.FileContentResponse :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3608,7 +3612,7 @@ async def list_vector_stores( :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStore :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3770,7 +3774,7 @@ async def create_vector_store( :rtype: ~azure.ai.client.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3853,7 +3857,7 @@ async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models :rtype: ~azure.ai.client.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4006,7 +4010,7 @@ async def modify_vector_store( :rtype: ~azure.ai.client.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4085,7 +4089,7 @@ async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _mod :rtype: ~azure.ai.client.models.VectorStoreDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4180,7 +4184,7 @@ async def list_vector_store_files( :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4328,7 +4332,7 @@ async def create_vector_store_file( :rtype: ~azure.ai.client.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4410,7 +4414,7 @@ async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwar :rtype: ~azure.ai.client.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4483,7 +4487,7 @@ async def delete_vector_store_file( :rtype: ~azure.ai.client.models.VectorStoreFileDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4627,7 +4631,7 @@ async def create_vector_store_file_batch( :rtype: ~azure.ai.client.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4711,7 +4715,7 @@ async def get_vector_store_file_batch( :rtype: ~azure.ai.client.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4782,7 +4786,7 @@ async def cancel_vector_store_file_batch( :rtype: ~azure.ai.client.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4881,7 +4885,7 @@ async def list_vector_store_file_batch_files( :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4968,7 +4972,7 @@ async def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: :rtype: ~azure.ai.client.models._models.ConnectionsListResponse :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4979,9 +4983,7 @@ async def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop( # pylint: disable=protected-access - "cls", None - ) + cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) _request = build_endpoints_list_request( api_version=self._config.api_version, @@ -5083,7 +5085,7 @@ async def _list_secrets( # pylint: disable=protected-access :rtype: ~azure.ai.client.models._models.ConnectionsListSecretsResponse :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5095,9 +5097,7 @@ async def _list_secrets( # pylint: disable=protected-access _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop( # pylint: disable=protected-access - "cls", None - ) + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) if body is _Unset: if connection_name is _Unset: @@ -5248,7 +5248,7 @@ async def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], * :rtype: ~azure.ai.client.models.Evaluation :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5332,7 +5332,7 @@ def list( maxpagesize = kwargs.pop("maxpagesize", None) cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5492,7 +5492,7 @@ async def update( :rtype: ~azure.ai.client.models.Evaluation :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5567,7 +5567,7 @@ async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: :rtype: ~azure.ai.client.models.Evaluation :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5621,3 +5621,461 @@ async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @overload + async def create_schedule( + self, body: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Creates an evaluation schedule. + + :param body: Properties of Evaluation Schedule. Required. + :type body: ~azure.ai.client.models.EvaluationSchedule + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.client.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_schedule( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Creates an evaluation schedule. + + :param body: Properties of Evaluation Schedule. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.client.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_schedule( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Creates an evaluation schedule. + + :param body: Properties of Evaluation Schedule. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.client.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_schedule( + self, body: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationSchedule: + """Creates an evaluation schedule. + + :param body: Properties of Evaluation Schedule. Is one of the following types: + EvaluationSchedule, JSON, IO[bytes] Required. + :type body: ~azure.ai.client.models.EvaluationSchedule or JSON or IO[bytes] + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.client.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_create_schedule_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_schedule(self, id: str, **kwargs: Any) -> _models.EvaluationSchedule: + """Get an evaluation schedule along with runs. + + :param id: Identifier of the evaluation schedule. Required. + :type id: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.client.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) + + _request = build_evaluations_get_schedule_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_schedules( + self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> AsyncIterable["_models.EvaluationSchedule"]: + """List evaluation schedules. + + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of EvaluationSchedule + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.client.models.EvaluationSchedule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.EvaluationSchedule]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_list_schedules_request( + top=top, + skip=skip, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url( + "self._config.workspace_name", self._config.workspace_name, "str" + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url( + "self._config.workspace_name", self._config.workspace_name, "str" + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.EvaluationSchedule], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace + def list_schedule_evaluations( + self, id: str, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> AsyncIterable["_models.Evaluation"]: + """List evaluations under a schedule. + + :param id: Identifier of the evaluation schedule. Required. + :type id: str + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of Evaluation + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.client.models.Evaluation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_list_schedule_evaluations_request( + id=id, + top=top, + skip=skip, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url( + "self._config.workspace_name", self._config.workspace_name, "str" + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url( + "self._config.workspace_name", self._config.workspace_name, "str" + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def delete_schedule(self, id: str, **kwargs: Any) -> None: + """Delete an evaluation schedule. + + :param id: Identifier of the evaluation schedule. Required. + :type id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_evaluations_delete_schedule_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py index 9377c96de006..61340ac38b60 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py @@ -17,6 +17,7 @@ from ._models import CodeInterpreterToolResource from ._models import Dataset from ._models import Evaluation +from ._models import EvaluationSchedule from ._models import EvaluatorConfiguration from ._models import FileContentResponse from ._models import FileDeletionStatus @@ -59,6 +60,8 @@ from ._models import OpenAIPageableListOfThreadRun from ._models import OpenAIPageableListOfVectorStore from ._models import OpenAIPageableListOfVectorStoreFile +from ._models import Recurrence +from ._models import RecurrenceSchedule from ._models import RequiredAction from ._models import RequiredFunctionToolCall from ._models import RequiredFunctionToolCallDetails @@ -98,6 +101,7 @@ from ._models import RunStepMessageCreationReference from ._models import RunStepToolCall from ._models import RunStepToolCallDetails +from ._models import SamplingStrategy from ._models import SubmitToolOutputsAction from ._models import SubmitToolOutputsDetails from ._models import SystemData @@ -138,6 +142,7 @@ from ._enums import ErrorEvent from ._enums import FilePurpose from ._enums import FileState +from ._enums import Frequency from ._enums import IncompleteRunDetails from ._enums import ListSortOrder from ._enums import MessageIncompleteDetailsReason @@ -160,12 +165,10 @@ from ._enums import VectorStoreFileStatus from ._enums import VectorStoreFileStatusFilter from ._enums import VectorStoreStatus +from ._enums import WeekDays from ._patch import __all__ as _patch_all from ._patch import * # pylint: disable=unused-wildcard-import from ._patch import patch_sdk as _patch_sdk -from ._models import CredentialsSASAuth -from ._enums import AuthenticationType -from ._enums import EndpointType __all__ = [ "Agent", @@ -175,13 +178,11 @@ "AgentsApiResponseFormat", "AgentsNamedToolChoice", "AppInsightsConfiguration", - "AuthenticationType", "CodeInterpreterToolDefinition", "CodeInterpreterToolResource", - "CredentialsSASAuth", "Dataset", - "EndpointType", "Evaluation", + "EvaluationSchedule", "EvaluatorConfiguration", "FileContentResponse", "FileDeletionStatus", @@ -224,6 +225,8 @@ "OpenAIPageableListOfThreadRun", "OpenAIPageableListOfVectorStore", "OpenAIPageableListOfVectorStoreFile", + "Recurrence", + "RecurrenceSchedule", "RequiredAction", "RequiredFunctionToolCall", "RequiredFunctionToolCallDetails", @@ -263,6 +266,7 @@ "RunStepMessageCreationReference", "RunStepToolCall", "RunStepToolCallDetails", + "SamplingStrategy", "SubmitToolOutputsAction", "SubmitToolOutputsDetails", "SystemData", @@ -302,6 +306,7 @@ "ErrorEvent", "FilePurpose", "FileState", + "Frequency", "IncompleteRunDetails", "ListSortOrder", "MessageIncompleteDetailsReason", @@ -324,6 +329,7 @@ "VectorStoreFileStatus", "VectorStoreFileStatusFilter", "VectorStoreStatus", + "WeekDays", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py index 76f30fcf6f85..a47264fce831 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py @@ -207,6 +207,16 @@ class FileState(str, Enum, metaclass=CaseInsensitiveEnumMeta): terminal state.""" +class Frequency(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Frequency of the schedule - day, week, month, hour, minute.""" + + MONTH = "Month" + WEEK = "Week" + DAY = "Day" + HOUR = "Hour" + MINUTE = "Minute" + + class IncompleteRunDetails(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. @@ -479,3 +489,15 @@ class VectorStoreStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): """in_progress status indicates that this vector store is still processing files.""" COMPLETED = "completed" """completed status indicates that this vector store is ready for use.""" + + +class WeekDays(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """WeekDay of the schedule - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday.""" + + MONDAY = "Monday" + TUESDAY = "Tuesday" + WEDNESDAY = "Wednesday" + THURSDAY = "Thursday" + FRIDAY = "Friday" + SATURDAY = "Saturday" + SUNDAY = "Sunday" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py index e57be1a42b29..9382a7a409c9 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py @@ -1,5 +1,5 @@ -# coding=utf-8 # pylint: disable=too-many-lines +# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. @@ -20,7 +20,6 @@ ) if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from .. import _types, models as _models @@ -368,30 +367,25 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles class InputData(_model_base.Model): - """Abstract data class. + """Abstract data class for input data configuration. You probably want to use the sub-classes and not this class directly. Known sub-classes are: AppInsightsConfiguration, Dataset - :ivar type: Discriminator property for InputData. Required. Default value is None. + :ivar type: Type of the data. Required. Default value is None. :vartype type: str - :ivar id: Evaluation input data. Required. - :vartype id: str """ __mapping__: Dict[str, _model_base.Model] = {} type: str = rest_discriminator(name="type") - """Discriminator property for InputData. Required. Default value is None.""" - id: str = rest_field(name="Uri") - """Evaluation input data. Required.""" + """Type of the data. Required. Default value is None.""" @overload def __init__( self, *, type: str, - id: str, # pylint: disable=redefined-builtin ): ... @overload @@ -408,31 +402,35 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles class AppInsightsConfiguration(InputData, discriminator="app_insights"): """Data Source for Application Insight. + Readonly variables are only populated by the server, and will be ignored when sending a request. + - :ivar id: Evaluation input data. Required. - :vartype id: str :ivar type: Required. Default value is "app_insights". :vartype type: str - :ivar connection_string: Application Insight connection string. Required. - :vartype connection_string: str - :ivar query: Query to fetch data. Required. + :ivar resource_id: LogAnalytic Workspace resourceID associated with AppInsights. Required. + :vartype resource_id: str + :ivar query: Query to fetch the data. Required. :vartype query: str + :ivar service_name: Service name. Required. + :vartype service_name: str """ - type: Literal["app_insights"] = rest_discriminator(name="type") # type: ignore + type: Literal["app_insights"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore """Required. Default value is \"app_insights\".""" - connection_string: str = rest_field(name="connectionString") - """Application Insight connection string. Required.""" + resource_id: str = rest_field(name="resourceId") + """LogAnalytic Workspace resourceID associated with AppInsights. Required.""" query: str = rest_field() - """Query to fetch data. Required.""" + """Query to fetch the data. Required.""" + service_name: str = rest_field(name="serviceName") + """Service name. Required.""" @overload def __init__( self, *, - id: str, # pylint: disable=redefined-builtin - connection_string: str, + resource_id: str, query: str, + service_name: str, ): ... @overload @@ -694,14 +692,16 @@ class Dataset(InputData, discriminator="dataset"): Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar id: Evaluation input data. Required. - :vartype id: str :ivar type: Required. Default value is "dataset". :vartype type: str + :ivar id: Evaluation input data. Required. + :vartype id: str """ type: Literal["dataset"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore """Required. Default value is \"dataset\".""" + id: str = rest_field(name="Uri") + """Evaluation input data. Required.""" @overload def __init__( @@ -731,8 +731,8 @@ class Evaluation(_model_base.Model): :vartype id: str :ivar data: Data for evaluation. Required. :vartype data: ~azure.ai.client.models.InputData - :ivar display_name: Update stage to 'Archive' to archive the asset. Default is Development, - which means the asset is under development. + :ivar display_name: Display Name for evaluation. It helps to find evaluation easily in AI + Studio. It does not need to be unique. :vartype display_name: str :ivar description: Description of the evaluation. It can be used to store additional information about the evaluation and is mutable. @@ -755,8 +755,8 @@ class Evaluation(_model_base.Model): data: "_models.InputData" = rest_field() """Data for evaluation. Required.""" display_name: Optional[str] = rest_field(name="displayName") - """Update stage to 'Archive' to archive the asset. Default is Development, which means the asset - is under development.""" + """Display Name for evaluation. It helps to find evaluation easily in AI Studio. It does not need + to be unique.""" description: Optional[str] = rest_field() """Description of the evaluation. It can be used to store additional information about the evaluation and is mutable.""" @@ -796,6 +796,96 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) +class EvaluationSchedule(_model_base.Model): # pylint: disable=too-many-instance-attributes + """Evaluation Schedule Definition. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: Identifier of the evaluation. + :vartype id: str + :ivar data: Data for evaluation. Required. + :vartype data: ~azure.ai.client.models.InputData + :ivar display_name: Display Name for evaluation. It helps to find evaluation easily in AI + Studio. It does not need to be unique. + :vartype display_name: str + :ivar description: Description of the evaluation. It can be used to store additional + information about the evaluation and is mutable. + :vartype description: str + :ivar system_data: Metadata containing createdBy and modifiedBy information. + :vartype system_data: ~azure.ai.client.models.SystemData + :ivar status: Status of the evaluation. It is set by service and is read-only. + :vartype status: str + :ivar tags: Evaluation's tags. Unlike properties, tags are fully mutable. + :vartype tags: dict[str, str] + :ivar properties: Evaluation's properties. Unlike tags, properties are add-only. Once added, a + property cannot be removed. + :vartype properties: dict[str, str] + :ivar evaluators: Evaluators to be used for the evaluation. Required. + :vartype evaluators: dict[str, ~azure.ai.client.models.EvaluatorConfiguration] + :ivar recurrence: Recurrence pattern for the evaluation. + :vartype recurrence: ~azure.ai.client.models.Recurrence + :ivar cron_expression: Cron expression for the evaluation. + :vartype cron_expression: str + :ivar sampling_strategy: Sampling strategy for the evaluation. Required. + :vartype sampling_strategy: ~azure.ai.client.models.SamplingStrategy + """ + + id: Optional[str] = rest_field() + """Identifier of the evaluation.""" + data: "_models.InputData" = rest_field() + """Data for evaluation. Required.""" + display_name: Optional[str] = rest_field(name="displayName") + """Display Name for evaluation. It helps to find evaluation easily in AI Studio. It does not need + to be unique.""" + description: Optional[str] = rest_field() + """Description of the evaluation. It can be used to store additional information about the + evaluation and is mutable.""" + system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) + """Metadata containing createdBy and modifiedBy information.""" + status: Optional[str] = rest_field(visibility=["read"]) + """Status of the evaluation. It is set by service and is read-only.""" + tags: Optional[Dict[str, str]] = rest_field() + """Evaluation's tags. Unlike properties, tags are fully mutable.""" + properties: Optional[Dict[str, str]] = rest_field() + """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be + removed.""" + evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field() + """Evaluators to be used for the evaluation. Required.""" + recurrence: Optional["_models.Recurrence"] = rest_field() + """Recurrence pattern for the evaluation.""" + cron_expression: Optional[str] = rest_field(name="cronExpression") + """Cron expression for the evaluation.""" + sampling_strategy: "_models.SamplingStrategy" = rest_field(name="samplingStrategy") + """Sampling strategy for the evaluation. Required.""" + + @overload + def __init__( + self, + *, + data: "_models.InputData", + evaluators: Dict[str, "_models.EvaluatorConfiguration"], + sampling_strategy: "_models.SamplingStrategy", + id: Optional[str] = None, # pylint: disable=redefined-builtin + display_name: Optional[str] = None, + description: Optional[str] = None, + tags: Optional[Dict[str, str]] = None, + properties: Optional[Dict[str, str]] = None, + recurrence: Optional["_models.Recurrence"] = None, + cron_expression: Optional[str] = None, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + class EvaluatorConfiguration(_model_base.Model): """Evaluator Configuration. @@ -2434,6 +2524,91 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.object: Literal["list"] = "list" +class Recurrence(_model_base.Model): + """Recurrence Definition. + + + :ivar frequency: The frequency to trigger schedule. Required. Known values are: "Month", + "Week", "Day", "Hour", and "Minute". + :vartype frequency: str or ~azure.ai.client.models.Frequency + :ivar interval: Specifies schedule interval in conjunction with frequency. Required. + :vartype interval: int + :ivar schedule: The recurrence schedule. Required. + :vartype schedule: ~azure.ai.client.models.RecurrenceSchedule + """ + + frequency: Union[str, "_models.Frequency"] = rest_field() + """The frequency to trigger schedule. Required. Known values are: \"Month\", \"Week\", \"Day\", + \"Hour\", and \"Minute\".""" + interval: int = rest_field() + """Specifies schedule interval in conjunction with frequency. Required.""" + schedule: "_models.RecurrenceSchedule" = rest_field() + """The recurrence schedule. Required.""" + + @overload + def __init__( + self, + *, + frequency: Union[str, "_models.Frequency"], + interval: int, + schedule: "_models.RecurrenceSchedule", + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RecurrenceSchedule(_model_base.Model): + """RecurrenceSchedule Definition. + + + :ivar hours: List of hours for the schedule. Required. + :vartype hours: list[int] + :ivar minutes: List of minutes for the schedule. Required. + :vartype minutes: list[int] + :ivar week_days: List of days for the schedule. Required. + :vartype week_days: list[str or ~azure.ai.client.models.WeekDays] + :ivar month_days: List of month days for the schedule. Required. + :vartype month_days: list[int] + """ + + hours: List[int] = rest_field() + """List of hours for the schedule. Required.""" + minutes: List[int] = rest_field() + """List of minutes for the schedule. Required.""" + week_days: List[Union[str, "_models.WeekDays"]] = rest_field(name="weekDays") + """List of days for the schedule. Required.""" + month_days: List[int] = rest_field(name="monthDays") + """List of month days for the schedule. Required.""" + + @overload + def __init__( + self, + *, + hours: List[int], + minutes: List[int], + week_days: List[Union[str, "_models.WeekDays"]], + month_days: List[int], + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + class RequiredAction(_model_base.Model): """An abstract representation of a required action for an agent thread run to continue. @@ -3953,6 +4128,35 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, type=RunStepType.TOOL_CALLS, **kwargs) +class SamplingStrategy(_model_base.Model): + """SamplingStrategy Definition. + + + :ivar rate: Sampling rate. Required. + :vartype rate: float + """ + + rate: float = rest_field() + """Sampling rate. Required.""" + + @overload + def __init__( + self, + *, + rate: float, + ): ... + + @overload + def __init__(self, mapping: Mapping[str, Any]): + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + class SubmitToolOutputsAction(RequiredAction, discriminator="submit_tool_outputs"): """The details for required tool calls that must be submitted for an agent thread run to continue. diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index 678d857e8545..26b197188a38 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -39,6 +39,7 @@ logger = logging.getLogger(__name__) + class EndpointProperties: def __init__(self, *, connection: ConnectionsListSecretsResponse, token_credential: TokenCredential = None) -> None: @@ -105,9 +106,10 @@ def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime: def _refresh_token(self) -> None: logger.debug("[SASTokenCredential._refresh_token] Enter") from azure.ai.client import AzureAIClient + ai_client = AzureAIClient( credential=self._credential, - endpoint = "not-needed", # Since we are only going to use the "endpoints" operations, we don't need to supply an endpoint. http://management.azure.com is hard coded in the SDK. + endpoint="not-needed", # Since we are only going to use the "endpoints" operations, we don't need to supply an endpoint. http://management.azure.com is hard coded in the SDK. subscription_id=self._subscription_id, resource_group_name=self._resource_group_name, workspace_name=self._workspace_name, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py index a9e40b7a014c..8e6a46afb1f5 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py @@ -18,7 +18,6 @@ "AgentsOperations", "EndpointsOperations", "EvaluationsOperations", - "InferenceOperations", ] __all__.extend([p for p in _patch_all if p not in __all__]) _patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py index 235a0449290d..c71ab5334762 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines,too-many-statements +# pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -9,7 +9,7 @@ from io import IOBase import json import sys -from typing import Any, Callable, Dict, IO, Iterable, List, Optional, TYPE_CHECKING, Type, TypeVar, Union, overload +from typing import Any, Callable, Dict, IO, Iterable, List, Optional, TYPE_CHECKING, TypeVar, Union, overload import urllib.parse from azure.core.exceptions import ( @@ -36,10 +36,9 @@ if sys.version_info >= (3, 9): from collections.abc import MutableMapping else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + from typing import MutableMapping # type: ignore if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports from .. import _types JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() @@ -1311,6 +1310,135 @@ def build_evaluations_get_request(id: str, **kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) +def build_evaluations_create_schedule_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/schedules/create" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_get_schedule_request(id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/schedules/{id}" + path_format_arguments = { + "id": _SERIALIZER.url("id", id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_list_schedules_request( + *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/schedules" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + if skip is not None: + _params["skip"] = _SERIALIZER.query("skip", skip, "int") + if maxpagesize is not None: + _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_list_schedule_evaluations_request( # pylint: disable=name-too-long + id: str, *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/schedules/{id}/runs" + path_format_arguments = { + "id": _SERIALIZER.url("id", id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + if skip is not None: + _params["skip"] = _SERIALIZER.query("skip", skip, "int") + if maxpagesize is not None: + _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_delete_schedule_request(id: str, **kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/schedules/{id}" + path_format_arguments = { + "id": _SERIALIZER.url("id", id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + class AgentsOperations: # pylint: disable=too-many-public-methods """ .. warning:: @@ -1480,7 +1608,7 @@ def create_agent( :rtype: ~azure.ai.client.models.Agent :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1593,7 +1721,7 @@ def list_agents( :rtype: ~azure.ai.client.models.OpenAIPageableListOfAgent :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1661,7 +1789,7 @@ def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: :rtype: ~azure.ai.client.models.Agent :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1886,7 +2014,7 @@ def update_agent( :rtype: ~azure.ai.client.models.Agent :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -1975,7 +2103,7 @@ def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletio :rtype: ~azure.ai.client.models.AgentDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2128,7 +2256,7 @@ def create_thread( :rtype: ~azure.ai.client.models.AgentThread :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2205,7 +2333,7 @@ def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: :rtype: ~azure.ai.client.models.AgentThread :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2360,7 +2488,7 @@ def update_thread( :rtype: ~azure.ai.client.models.AgentThread :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2438,7 +2566,7 @@ def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletion :rtype: ~azure.ai.client.models.ThreadDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2617,7 +2745,7 @@ def create_message( :rtype: ~azure.ai.client.models.ThreadMessage :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2728,7 +2856,7 @@ def list_messages( :rtype: ~azure.ai.client.models.OpenAIPageableListOfThreadMessage :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2800,7 +2928,7 @@ def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models :rtype: ~azure.ai.client.models.ThreadMessage :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -2952,7 +3080,7 @@ def update_message( :rtype: ~azure.ai.client.models.ThreadMessage :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3259,7 +3387,7 @@ def create_run( :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3381,7 +3509,7 @@ def list_runs( :rtype: ~azure.ai.client.models.OpenAIPageableListOfThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3452,7 +3580,7 @@ def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadR :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3604,7 +3732,7 @@ def update_run( :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3779,7 +3907,7 @@ def submit_tool_outputs_to_run( :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -3862,7 +3990,7 @@ def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Thre :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4144,7 +4272,7 @@ def create_thread_and_run( :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4243,7 +4371,7 @@ def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs: Any) :rtype: ~azure.ai.client.models.RunStep :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4339,7 +4467,7 @@ def list_run_steps( :rtype: ~azure.ai.client.models.OpenAIPageableListOfRunStep :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4413,7 +4541,7 @@ def list_files( :rtype: ~azure.ai.client.models.FileListResponse :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4526,7 +4654,7 @@ def upload_file( :rtype: ~azure.ai.client.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4604,7 +4732,7 @@ def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus :rtype: ~azure.ai.client.models.FileDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4669,7 +4797,7 @@ def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: :rtype: ~azure.ai.client.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4734,7 +4862,7 @@ def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentRe :rtype: ~azure.ai.client.models.FileContentResponse :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4822,7 +4950,7 @@ def list_vector_stores( :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStore :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -4984,7 +5112,7 @@ def create_vector_store( :rtype: ~azure.ai.client.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5067,7 +5195,7 @@ def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Vecto :rtype: ~azure.ai.client.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5220,7 +5348,7 @@ def modify_vector_store( :rtype: ~azure.ai.client.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5299,7 +5427,7 @@ def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Ve :rtype: ~azure.ai.client.models.VectorStoreDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5394,7 +5522,7 @@ def list_vector_store_files( :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5542,7 +5670,7 @@ def create_vector_store_file( :rtype: ~azure.ai.client.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5624,7 +5752,7 @@ def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: An :rtype: ~azure.ai.client.models.VectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5697,7 +5825,7 @@ def delete_vector_store_file( :rtype: ~azure.ai.client.models.VectorStoreFileDeletionStatus :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5841,7 +5969,7 @@ def create_vector_store_file_batch( :rtype: ~azure.ai.client.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5925,7 +6053,7 @@ def get_vector_store_file_batch( :rtype: ~azure.ai.client.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -5996,7 +6124,7 @@ def cancel_vector_store_file_batch( :rtype: ~azure.ai.client.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6095,7 +6223,7 @@ def list_vector_store_file_batch_files( :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStoreFile :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6182,7 +6310,7 @@ def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: # py :rtype: ~azure.ai.client.models._models.ConnectionsListResponse :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6193,9 +6321,7 @@ def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: # py _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop( # pylint: disable=protected-access - "cls", None - ) + cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) _request = build_endpoints_list_request( api_version=self._config.api_version, @@ -6297,7 +6423,7 @@ def _list_secrets( # pylint: disable=protected-access :rtype: ~azure.ai.client.models._models.ConnectionsListSecretsResponse :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6309,9 +6435,7 @@ def _list_secrets( # pylint: disable=protected-access _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop( # pylint: disable=protected-access - "cls", None - ) + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) if body is _Unset: if connection_name is _Unset: @@ -6460,7 +6584,7 @@ def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwarg :rtype: ~azure.ai.client.models.Evaluation :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6544,7 +6668,7 @@ def list( maxpagesize = kwargs.pop("maxpagesize", None) cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6704,7 +6828,7 @@ def update( :rtype: ~azure.ai.client.models.Evaluation :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6779,7 +6903,7 @@ def get(self, id: str, **kwargs: Any) -> _models.Evaluation: :rtype: ~azure.ai.client.models.Evaluation :raises ~azure.core.exceptions.HttpResponseError: """ - error_map: MutableMapping[int, Type[HttpResponseError]] = { # pylint: disable=unsubscriptable-object + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError, @@ -6833,3 +6957,461 @@ def get(self, id: str, **kwargs: Any) -> _models.Evaluation: return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @overload + def create_schedule( + self, body: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Creates an evaluation schedule. + + :param body: Properties of Evaluation Schedule. Required. + :type body: ~azure.ai.client.models.EvaluationSchedule + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.client.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_schedule( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Creates an evaluation schedule. + + :param body: Properties of Evaluation Schedule. Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.client.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_schedule( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Creates an evaluation schedule. + + :param body: Properties of Evaluation Schedule. Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.client.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_schedule( + self, body: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationSchedule: + """Creates an evaluation schedule. + + :param body: Properties of Evaluation Schedule. Is one of the following types: + EvaluationSchedule, JSON, IO[bytes] Required. + :type body: ~azure.ai.client.models.EvaluationSchedule or JSON or IO[bytes] + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.client.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_create_schedule_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_schedule(self, id: str, **kwargs: Any) -> _models.EvaluationSchedule: + """Get an evaluation schedule along with runs. + + :param id: Identifier of the evaluation schedule. Required. + :type id: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.client.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) + + _request = build_evaluations_get_schedule_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_schedules( + self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> Iterable["_models.EvaluationSchedule"]: + """List evaluation schedules. + + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of EvaluationSchedule + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.client.models.EvaluationSchedule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.EvaluationSchedule]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_list_schedules_request( + top=top, + skip=skip, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url( + "self._config.workspace_name", self._config.workspace_name, "str" + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url( + "self._config.workspace_name", self._config.workspace_name, "str" + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.EvaluationSchedule], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def list_schedule_evaluations( + self, id: str, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> Iterable["_models.Evaluation"]: + """List evaluations under a schedule. + + :param id: Identifier of the evaluation schedule. Required. + :type id: str + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of Evaluation + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.client.models.Evaluation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_list_schedule_evaluations_request( + id=id, + top=top, + skip=skip, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url( + "self._config.workspace_name", self._config.workspace_name, "str" + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url( + "self._config.workspace_name", self._config.workspace_name, "str" + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def delete_schedule(self, id: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Delete an evaluation schedule. + + :param id: Identifier of the evaluation schedule. Required. + :type id: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_evaluations_delete_schedule_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index 134007731cbb..38383ad48d89 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -1,3 +1,4 @@ +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -203,6 +204,7 @@ def get(self, *, endpoint_name: str, populate_secrets: bool = False) -> Connecti return EndpointProperties(connection=connection, token_credential=self._config.credential) elif connection.properties.auth_type == AuthenticationType.SAS: from ..models._patch import SASTokenCredential + token_credential = SASTokenCredential( sas_token=connection.properties.credentials.sas, credential=self._config.credential, diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_cancel_run_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_cancel_run_maximum_set_gen.py new file mode 100644 index 000000000000..0fc09fadab61 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_cancel_run_maximum_set_gen.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_cancel_run_maximum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.cancel_run( + thread_id="sxximmrzssszbrz", + run_id="reoghmjcd", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_CancelRun_MaximumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_cancel_vector_store_file_batch_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_cancel_vector_store_file_batch_minimum_set_gen.py new file mode 100644 index 000000000000..3108e9a4632c --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_cancel_vector_store_file_batch_minimum_set_gen.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_cancel_vector_store_file_batch_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.cancel_vector_store_file_batch( + vector_store_id="esqgxemsdquv", + batch_id="ukjhmcvwhahdrhhiiyortbbjlhx", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_CancelVectorStoreFileBatch_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_delete_agent_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_delete_agent_minimum_set_gen.py new file mode 100644 index 000000000000..5eedea350f06 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_delete_agent_minimum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_delete_agent_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.delete_agent( + assistant_id="gjbkohvdtcvgmgubguj", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_DeleteAgent_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_delete_file_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_delete_file_minimum_set_gen.py new file mode 100644 index 000000000000..d027aed6ae44 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_delete_file_minimum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_delete_file_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.delete_file( + file_id="flyag", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_DeleteFile_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_delete_thread_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_delete_thread_minimum_set_gen.py new file mode 100644 index 000000000000..405b18477a63 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_delete_thread_minimum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_delete_thread_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.delete_thread( + thread_id="zwmmodqpcvxghsghkjw", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_DeleteThread_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_file_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_file_minimum_set_gen.py new file mode 100644 index 000000000000..92f956c6ff5f --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_file_minimum_set_gen.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_delete_vector_store_file_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.delete_vector_store_file( + vector_store_id="hbnrqrcvbsowbuksdrtcqi", + file_id="vzshxliuznhftv", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_DeleteVectorStoreFile_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_minimum_set_gen.py new file mode 100644 index 000000000000..11417b53fa64 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_minimum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_delete_vector_store_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.delete_vector_store( + vector_store_id="sshdziwghotwucptzhzgdxpggsedy", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_DeleteVectorStore_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_agent_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_agent_minimum_set_gen.py new file mode 100644 index 000000000000..5163c2d9357b --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_get_agent_minimum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_get_agent_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.get_agent( + assistant_id="aorarcltzoneckqmfoluejbhgbm", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_GetAgent_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_file_content_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_file_content_minimum_set_gen.py new file mode 100644 index 000000000000..c32dfdd03333 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_get_file_content_minimum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_get_file_content_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.get_file_content( + file_id="oudalhdmazgj", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_GetFileContent_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_file_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_file_minimum_set_gen.py new file mode 100644 index 000000000000..4f657dffaa2b --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_get_file_minimum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_get_file_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.get_file( + file_id="ahlwbnjjg", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_GetFile_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_message_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_message_minimum_set_gen.py new file mode 100644 index 000000000000..7f014bca5159 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_get_message_minimum_set_gen.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_get_message_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.get_message( + thread_id="secglofbhaocemzzskoeatl", + message_id="axpvtnmnjmpctkmnoswam", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_GetMessage_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_run_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_run_maximum_set_gen.py new file mode 100644 index 000000000000..c939bf8d423b --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_get_run_maximum_set_gen.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_get_run_maximum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.get_run( + thread_id="ilcsibdqbvldqyvmbjjtvirntap", + run_id="scpdacucqpuoqjihekkeolfpdyeea", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_GetRun_MaximumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_run_step_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_run_step_minimum_set_gen.py new file mode 100644 index 000000000000..747950b6e57f --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_get_run_step_minimum_set_gen.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_get_run_step_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.get_run_step( + thread_id="jwopmhvryvcpltxhimyrvkcwel", + run_id="echizvrhsigfce", + step_id="kc", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_GetRunStep_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_thread_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_thread_minimum_set_gen.py new file mode 100644 index 000000000000..e549f5ee4f80 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_get_thread_minimum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_get_thread_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.get_thread( + thread_id="tyfreqamibskuzfoyo", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_GetThread_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_batch_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_batch_minimum_set_gen.py new file mode 100644 index 000000000000..d1084d004d10 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_batch_minimum_set_gen.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_get_vector_store_file_batch_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.get_vector_store_file_batch( + vector_store_id="lbs", + batch_id="zxqbzpge", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_GetVectorStoreFileBatch_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_minimum_set_gen.py new file mode 100644 index 000000000000..ed62cc06a203 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_minimum_set_gen.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_get_vector_store_file_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.get_vector_store_file( + vector_store_id="tcpppckhbcqnmxcpqnsdhmocbfkvw", + file_id="swmxwhxjbntnayymmrrocysdsrio", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_GetVectorStoreFile_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_minimum_set_gen.py new file mode 100644 index 000000000000..6fa3b60a5e04 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_minimum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_get_vector_store_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.get_vector_store( + vector_store_id="imldyfrenvwrdahxomrvypghklgzfm", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_GetVectorStore_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_agents_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_agents_minimum_set_gen.py new file mode 100644 index 000000000000..88cf1458e1ce --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_list_agents_minimum_set_gen.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_list_agents_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.list_agents() + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_ListAgents_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_files_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_files_minimum_set_gen.py new file mode 100644 index 000000000000..81f7cd719fe5 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_list_files_minimum_set_gen.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_list_files_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.list_files() + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_ListFiles_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_messages_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_messages_minimum_set_gen.py new file mode 100644 index 000000000000..39c7f858737f --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_list_messages_minimum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_list_messages_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.list_messages( + thread_id="bpo", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_ListMessages_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_run_steps_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_run_steps_minimum_set_gen.py new file mode 100644 index 000000000000..783eed5ea9ce --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_list_run_steps_minimum_set_gen.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_list_run_steps_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.list_run_steps( + thread_id="rgpbsplbzxqewjirwdhbcvc", + run_id="jjtdybnzsckqsid", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_ListRunSteps_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_runs_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_runs_maximum_set_gen.py new file mode 100644 index 000000000000..4827529a038e --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_list_runs_maximum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_list_runs_maximum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.list_runs( + thread_id="mkbrj", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_ListRuns_MaximumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_file_batch_files_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_file_batch_files_minimum_set_gen.py new file mode 100644 index 000000000000..1b54f84a9f2d --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_file_batch_files_minimum_set_gen.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_list_vector_store_file_batch_files_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.list_vector_store_file_batch_files( + vector_store_id="vdelhyyzsgiavifhhzvtvqeqhhsuh", + batch_id="gffqfjubkoliaarvbyq", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_ListVectorStoreFileBatchFiles_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_files_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_files_minimum_set_gen.py new file mode 100644 index 000000000000..405fc7ae13d9 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_files_minimum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_list_vector_store_files_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.list_vector_store_files( + vector_store_id="al", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_ListVectorStoreFiles_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_stores_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_stores_minimum_set_gen.py new file mode 100644 index 000000000000..b731ec0b53d4 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_stores_minimum_set_gen.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_list_vector_stores_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.list_vector_stores() + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_ListVectorStores_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_update_run_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_update_run_maximum_set_gen.py new file mode 100644 index 000000000000..a04f7a260322 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/agents_update_run_maximum_set_gen.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python agents_update_run_maximum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.agents.update_run( + thread_id="rtzvfjw", + run_id="ibopwyspzczc", + body={"metadata": {"key5931": "ojeukdviplvt"}}, + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Agents_UpdateRun_MaximumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_create_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_create_maximum_set_gen.py new file mode 100644 index 000000000000..86f5b25179f9 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/evaluations_create_maximum_set_gen.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python evaluations_create_maximum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.evaluations.create( + evaluation={ + "data": {"Uri": "bzelztdbitxoxsrqlthlnacjssata", "type": "Evaluations.InputData"}, + "description": "dfydblbzvilyvhdtqo", + "displayName": "lresoznoqbpwvsummatfyc", + "evaluators": { + "key4462": {"dataMapping": {"key6077": "rkegahkqoagtfoxuxizgo"}, "id": "bodhxfmbqquu", "initParams": {}} + }, + "id": "qfkteoypwndeirixziosai", + "properties": {"key6705": "eooztvyhcibkecr"}, + "systemData": {}, + "tags": {"key9950": "umaulvau"}, + }, + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Evaluations_Create_MaximumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_create_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_create_minimum_set_gen.py new file mode 100644 index 000000000000..b74d321cf74d --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/evaluations_create_minimum_set_gen.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python evaluations_create_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.evaluations.create( + evaluation={ + "data": {"Uri": "bzelztdbitxoxsrqlthlnacjssata", "type": "Evaluations.InputData"}, + "evaluators": {}, + }, + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Evaluations_Create_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_get_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_get_maximum_set_gen.py new file mode 100644 index 000000000000..a20226495dcf --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/evaluations_get_maximum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python evaluations_get_maximum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.evaluations.get( + id="93-", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Evaluations_Get_MaximumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_get_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_get_minimum_set_gen.py new file mode 100644 index 000000000000..33bd9bc4aede --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/evaluations_get_minimum_set_gen.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python evaluations_get_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.evaluations.get( + id="0_0", + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Evaluations_Get_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_list_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_list_maximum_set_gen.py new file mode 100644 index 000000000000..e472a0399432 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/evaluations_list_maximum_set_gen.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python evaluations_list_maximum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.evaluations.list() + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01-preview/Evaluations_List_MaximumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_list_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_list_minimum_set_gen.py new file mode 100644 index 000000000000..9c4f643f6f9c --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/evaluations_list_minimum_set_gen.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python evaluations_list_minimum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.evaluations.list() + for item in response: + print(item) + + +# x-ms-original-file: 2024-07-01-preview/Evaluations_List_MinimumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_update_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_update_maximum_set_gen.py new file mode 100644 index 000000000000..dcf90380de75 --- /dev/null +++ b/sdk/ai/azure-ai-client/generated_samples/evaluations_update_maximum_set_gen.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.identity import DefaultAzureCredential + +from azure.ai.client import AzureAIClient + +""" +# PREREQUISITES + pip install azure-identity + pip install azure-ai-client +# USAGE + python evaluations_update_maximum_set_gen.py + + Before run the sample, please set the values of the client ID, tenant ID and client secret + of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, + AZURE_CLIENT_SECRET. For more info about how to get the value, please see: + https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal +""" + + +def main(): + client = AzureAIClient( + endpoint="ENDPOINT", + subscription_id="SUBSCRIPTION_ID", + resource_group_name="RESOURCE_GROUP_NAME", + workspace_name="WORKSPACE_NAME", + credential=DefaultAzureCredential(), + ) + + response = client.evaluations.update( + id="8y", + update_request={ + "description": "vl", + "displayName": "zkystmqhvncvxnxrhahhulbui", + "tags": {"key6951": "mirtkcesgent"}, + }, + ) + print(response) + + +# x-ms-original-file: 2024-07-01-preview/Evaluations_Update_MaximumSet_Gen.json +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations.py b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations.py index a698c1815fd3..bc8f590aa3cb 100644 --- a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations.py +++ b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations.py @@ -69,3 +69,78 @@ def test_evaluations_get(self, azureai_endpoint): # please add some check logic here by yourself # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_evaluations_create_schedule(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.evaluations.create_schedule( + body={ + "data": "input_data", + "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, + "samplingStrategy": {"rate": 0.0}, + "cronExpression": "str", + "description": "str", + "displayName": "str", + "id": "str", + "properties": {"str": "str"}, + "recurrence": { + "frequency": "str", + "interval": 0, + "schedule": {"hours": [0], "minutes": [0], "monthDays": [0], "weekDays": ["str"]}, + }, + "status": "str", + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + }, + "tags": {"str": "str"}, + }, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_evaluations_get_schedule(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.evaluations.get_schedule( + id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_evaluations_list_schedules(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.evaluations.list_schedules() + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_evaluations_list_schedule_evaluations(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.evaluations.list_schedule_evaluations( + id="str", + ) + result = [r for r in response] + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy + def test_evaluations_delete_schedule(self, azureai_endpoint): + client = self.create_client(endpoint=azureai_endpoint) + response = client.evaluations.delete_schedule( + id="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations_async.py b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations_async.py index 7f9b753e530a..8c1e28de6539 100644 --- a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations_async.py +++ b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations_async.py @@ -70,3 +70,78 @@ async def test_evaluations_get(self, azureai_endpoint): # please add some check logic here by yourself # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_evaluations_create_schedule(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.evaluations.create_schedule( + body={ + "data": "input_data", + "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, + "samplingStrategy": {"rate": 0.0}, + "cronExpression": "str", + "description": "str", + "displayName": "str", + "id": "str", + "properties": {"str": "str"}, + "recurrence": { + "frequency": "str", + "interval": 0, + "schedule": {"hours": [0], "minutes": [0], "monthDays": [0], "weekDays": ["str"]}, + }, + "status": "str", + "systemData": { + "createdAt": "2020-02-20 00:00:00", + "createdBy": "str", + "createdByType": "str", + "lastModifiedAt": "2020-02-20 00:00:00", + }, + "tags": {"str": "str"}, + }, + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_evaluations_get_schedule(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.evaluations.get_schedule( + id="str", + ) + + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_evaluations_list_schedules(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = client.evaluations.list_schedules() + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_evaluations_list_schedule_evaluations(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = client.evaluations.list_schedule_evaluations( + id="str", + ) + result = [r async for r in response] + # please add some check logic here by yourself + # ... + + @AzureAIPreparer() + @recorded_by_proxy_async + async def test_evaluations_delete_schedule(self, azureai_endpoint): + client = self.create_async_client(endpoint=azureai_endpoint) + response = await client.evaluations.delete_schedule( + id="str", + ) + + # please add some check logic here by yourself + # ... diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py index 174ba4c98241..b30d47c651e1 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py @@ -37,14 +37,21 @@ # Create an agent and run user's request with function calls agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", tools=functions.definitions + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, ) print(f"Created agent, ID: {agent.id}") thread = ai_client.agents.create_thread() print(f"Created thread, ID: {thread.id}") -message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, send an email with the datetime and weather information in New York?") +message = ai_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", +) print(f"Created message, ID: {message.id}") run = ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) @@ -65,16 +72,14 @@ for tool_call in tool_calls: output = functions.execute(tool_call) tool_output = { - "tool_call_id": tool_call.id, - "output": output, - } + "tool_call_id": tool_call.id, + "output": output, + } tool_outputs.append(tool_output) print(f"Tool outputs: {tool_outputs}") if tool_outputs: - ai_client.agents.submit_tool_outputs_to_run( - thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs - ) + ai_client.agents.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs) print(f"Current run status: {run.status}") @@ -86,4 +91,4 @@ # Fetch and log all messages messages = ai_client.agents.list_messages(thread_id=thread.id) -print(f"Messages: {messages}") \ No newline at end of file +print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py index 6823de1233bf..b8e3a77777a0 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py @@ -32,7 +32,7 @@ ) """ -# Initialize agent toolset with user functions and code interpreter +# Initialize agent toolset with user functions and code interpreter functions = FunctionTool(user_functions) code_interpreter = CodeInterpreterTool() @@ -51,7 +51,11 @@ print(f"Created thread, ID: {thread.id}") # Create message to thread -message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, send an email with the datetime and weather information in New York?") +message = ai_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", +) print(f"Created message, ID: {message.id}") # Create and process agent run in thread with tools diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py index d02f64ead043..d785f8eda61a 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py @@ -41,6 +41,7 @@ ) """ + class MyEventHandler(AgentEventHandler): def on_message_delta(self, delta: "MessageDeltaChunk") -> None: for content_part in delta.delta.content: @@ -80,10 +81,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Created message, message ID {message.id}") with ai_client.agents.create_and_process_run( - thread_id=thread.id, - assistant_id=agent.id, - stream=True, - event_handler=MyEventHandler() + thread_id=thread.id, assistant_id=agent.id, stream=True, event_handler=MyEventHandler() ) as stream: stream.until_done() diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py index 92110a6914fe..185e1c775283 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py @@ -83,4 +83,4 @@ print("Deleted agent") messages = ai_client.agents.list_messages(thread_id=thread.id) -print(f"Messages: {messages}") \ No newline at end of file +print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/user_functions.py b/sdk/ai/azure-ai-client/samples/agents/user_functions.py index 5c787f972fd3..8072b1b8a944 100644 --- a/sdk/ai/azure-ai-client/samples/agents/user_functions.py +++ b/sdk/ai/azure-ai-client/samples/agents/user_functions.py @@ -8,6 +8,7 @@ # These are the user-defined functions that can be called by the agent. + def fetch_current_datetime() -> str: """ Get the current time as a JSON string. @@ -30,11 +31,7 @@ def fetch_weather(location: str) -> str: """ # In a real-world scenario, you'd integrate with a weather API. # Here, we'll mock the response. - mock_weather_data = { - "New York": "Sunny, 25°C", - "London": "Cloudy, 18°C", - "Tokyo": "Rainy, 22°C" - } + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} weather = mock_weather_data.get(location, "Weather data not available for this location.") weather_json = json.dumps({"weather": weather}) return weather_json @@ -62,7 +59,7 @@ def send_email(recipient: str, subject: str, body: str) -> str: # Statically defined user functions for fast reference user_functions = { - "fetch_current_datetime": fetch_current_datetime, - "fetch_weather": fetch_weather, - "send_email": send_email + "fetch_current_datetime": fetch_current_datetime, + "fetch_weather": fetch_weather, + "send_email": send_email, } diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py index a83e6ce67204..933df0fb3c36 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py @@ -32,5 +32,3 @@ ) print(response.choices[0].message.content) - - diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py index 4eb22c61e4fb..c6238e59b5cf 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py @@ -25,4 +25,3 @@ response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) print(response.choices[0].message.content) - diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py index 926d3ccd9973..7adcef7db8c0 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py @@ -29,5 +29,3 @@ f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" ) - - diff --git a/sdk/ai/azure-ai-client/tsp-location.yaml b/sdk/ai/azure-ai-client/tsp-location.yaml index 3d64c8315a3c..2746197f6f58 100644 --- a/sdk/ai/azure-ai-client/tsp-location.yaml +++ b/sdk/ai/azure-ai-client/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Client -commit: e8d6573daa810bed2d54db7da045c629731239bc +commit: 075ca688a475e4fbbbcfc2af8aa18bd6a9ff7680 repo: Azure/azure-rest-api-specs additionalDirectories: From 72cadc33833089efe78cb914843eb9a43e4abeda Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 9 Oct 2024 10:05:03 -0700 Subject: [PATCH 019/138] Patch constructor for async AzureAIClient (#37804) --- .../azure-ai-client/azure/ai/client/_patch.py | 32 ++- .../azure/ai/client/aio/_patch.py | 183 ++++++++++++++- .../azure/ai/client/aio/operations/_patch.py | 212 +++++++++++++++++- .../azure/ai/client/operations/_patch.py | 2 +- .../async_samples/sample_endpoints_async.py | 141 ++++++++++++ .../samples/endpoints/sample_endpoints.py | 7 +- ...ample_get_chat_completions_client_async.py | 58 +++++ .../sample_get_azure_openai_client.py | 4 + .../sample_get_chat_completions_client.py | 4 + .../inference/sample_get_embeddings_client.py | 4 + 10 files changed, 633 insertions(+), 14 deletions(-) create mode 100644 sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py create mode 100644 sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index 34ef3ed53064..1ec9faeef28e 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -7,10 +7,11 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ from typing import List, Any +from typing_extensions import Self from azure.core.credentials import TokenCredential from azure.core import PipelineClient from azure.core.pipeline import policies -from ._configuration import AzureAIClientConfiguration as ClientConfiguration +from ._configuration import AzureAIClientConfiguration from ._serialization import Deserializer, Serializer from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations from ._client import AzureAIClient as ClientGenerated @@ -50,7 +51,7 @@ def __init__( # For Endpoints operations (enumerating connections, getting SAS tokens) _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long - self._config1 = ClientConfiguration( + self._config1 = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, @@ -77,11 +78,11 @@ def __init__( policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, self._config1.http_logging_policy, ] - self._client1: PipelineClient = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) + self._client1 = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) # For Agents operations _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long - self._config2 = ClientConfiguration( + self._config2 = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, @@ -108,11 +109,11 @@ def __init__( policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, self._config2.http_logging_policy, ] - self._client2: PipelineClient = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) + self._client2 = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) # For Cloud Evaluations operations _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long - self._config3 = ClientConfiguration( + self._config3 = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, @@ -139,7 +140,7 @@ def __init__( policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, self._config3.http_logging_policy, ] - self._client3: PipelineClient = PipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) + self._client3 = PipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) self._serialize = Serializer() self._deserialize = Deserializer() @@ -150,6 +151,23 @@ def __init__( self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) self.inference = InferenceOperations(self) + def close(self) -> None: + self._client1.close() + self._client2.close() + self._client3.close() + + def __enter__(self) -> Self: + self._client1.__enter__() + self._client2.__enter__() + self._client3.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client1.__exit__(*exc_details) + self._client2.__exit__(*exc_details) + self._client3.__exit__(*exc_details) + + @classmethod def from_connection_string(cls, connection: str, credential: "TokenCredential", **kwargs) -> "AzureAIClient": """ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py index f7dd32510333..1201648eb3f9 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py @@ -6,9 +6,188 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List +from typing import List, Any +from azure.core import AsyncPipelineClient +from azure.core.pipeline import policies +from typing_extensions import Self -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +from .._serialization import Deserializer, Serializer +from ._configuration import AzureAIClientConfiguration +from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations +from ._client import AzureAIClient as ClientGenerated +from .operations._patch import InferenceOperations + + +class AzureAIClient(ClientGenerated): + + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + workspace_name: str, + credential: "AsyncTokenCredential", + **kwargs: Any, + ) -> None: + # TODO: Validate input formats with regex match (e.g. subscription ID) + if not endpoint: + raise ValueError("endpoint is required") + if not subscription_id: + raise ValueError("subscription_id ID is required") + if not resource_group_name: + raise ValueError("resource_group_name is required") + if not workspace_name: + raise ValueError("workspace_name is required") + if not credential: + raise ValueError("Credential is required") + if "api_version" in kwargs: + raise ValueError("No support for overriding the API version") + if "credential_scopes" in kwargs: + raise ValueError("No support for overriding the credential scopes") + + kwargs1 = kwargs.copy() + kwargs2 = kwargs.copy() + kwargs3 = kwargs.copy() + + # For Endpoints operations (enumerating connections, getting SAS tokens) + _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long + self._config1 = AzureAIClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + workspace_name=workspace_name, + credential=credential, + api_version="2024-07-01-preview", + credential_scopes=["https://management.azure.com"], + **kwargs1, + ) + _policies1 = kwargs1.pop("policies", None) + if _policies1 is None: + _policies1 = [ + policies.RequestIdPolicy(**kwargs1), + self._config1.headers_policy, + self._config1.user_agent_policy, + self._config1.proxy_policy, + policies.ContentDecodePolicy(**kwargs1), + self._config1.redirect_policy, + self._config1.retry_policy, + self._config1.authentication_policy, + self._config1.custom_hook_policy, + self._config1.logging_policy, + policies.DistributedTracingPolicy(**kwargs1), + policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, + self._config1.http_logging_policy, + ] + self._client1 = AsyncPipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) + + # For Agents operations + _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long + self._config2 = AzureAIClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + workspace_name=workspace_name, + credential=credential, + api_version="2024-07-01-preview", # TODO: Update me + credential_scopes=["https://ml.azure.com"], + **kwargs2, + ) + _policies2 = kwargs2.pop("policies", None) + if _policies2 is None: + _policies2 = [ + policies.RequestIdPolicy(**kwargs2), + self._config2.headers_policy, + self._config2.user_agent_policy, + self._config2.proxy_policy, + policies.ContentDecodePolicy(**kwargs2), + self._config2.redirect_policy, + self._config2.retry_policy, + self._config2.authentication_policy, + self._config2.custom_hook_policy, + self._config2.logging_policy, + policies.DistributedTracingPolicy(**kwargs2), + policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, + self._config2.http_logging_policy, + ] + self._client2 = AsyncPipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) + + # For Cloud Evaluations operations + _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long + self._config3 = AzureAIClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + workspace_name=workspace_name, + credential=credential, + api_version="2024-07-01-preview", # TODO: Update me + credential_scopes=["https://management.azure.com"], # TODO: Update once service changes are ready + **kwargs3, + ) + _policies3 = kwargs3.pop("policies", None) + if _policies3 is None: + _policies3 = [ + policies.RequestIdPolicy(**kwargs3), + self._config3.headers_policy, + self._config3.user_agent_policy, + self._config3.proxy_policy, + policies.ContentDecodePolicy(**kwargs3), + self._config3.redirect_policy, + self._config3.retry_policy, + self._config3.authentication_policy, + self._config3.custom_hook_policy, + self._config3.logging_policy, + policies.DistributedTracingPolicy(**kwargs3), + policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, + self._config3.http_logging_policy, + ] + self._client3 = AsyncPipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + self.endpoints = EndpointsOperations(self._client1, self._config1, self._serialize, self._deserialize) + self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) + self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) + self.inference = InferenceOperations(self) + + async def close(self) -> None: + await self._client1.close() + await self._client2.close() + await self._client3.close() + + async def __aenter__(self) -> Self: + await self._client1.__aenter__() + await self._client2.__aenter__() + await self._client3.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client1.__aexit__(*exc_details) + await self._client2.__aexit__(*exc_details) + await self._client3.__aexit__(*exc_details) + + @classmethod + def from_connection_string(cls, connection: str, credential: "AzureTokenCredential", **kwargs) -> "AzureAIClient": + """ + Create an asynchronous AzureAIClient from a connection string. + + :param connection: The connection string, copied from your AI Studio project. + """ + if not connection: + raise ValueError("Connection string is required") + parts = connection.split(";") + if len(parts) != 4: + raise ValueError("Invalid connection string format") + endpoint = parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + workspace_name = parts[3] + return cls(endpoint, subscription_id, resource_group_name, workspace_name, credential, **kwargs) + +__all__: List[str] = [ + "AzureAIClient", +] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index f7dd32510333..c424c6d97b44 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -6,9 +6,216 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List +import logging +from typing import List, AsyncIterable +from ._operations import EndpointsOperations as EndpointsOperationsGenerated +from ...models._patch import EndpointProperties +from ...models._enums import AuthenticationType, EndpointType +from ...models._models import ConnectionsListSecretsResponse, ConnectionsListResponse -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +logger = logging.getLogger(__name__) + + +class InferenceOperations: + + def __init__(self, outer_instance): + self.outer_instance = outer_instance + + async def get_chat_completions_client(self) -> "ChatCompletionsClient": + endpoint = await self.outer_instance.endpoints.get_default( + endpoint_type=EndpointType.SERVERLESS, populate_secrets=True + ) + if not endpoint: + raise ValueError("No serverless endpoint found") + + try: + from azure.ai.inference.aio import ChatCompletionsClient + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" + ) + + if endpoint.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" + ) + from azure.core.credentials import AzureKeyCredential + client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) + elif endpoint.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" + ) + client = ChatCompletionsClient( + endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential + ) + elif endpoint.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" + ) + client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=endpoint.token_credential) + else: + raise ValueError("Unknown authentication type") + + return client + + async def get_embeddings_client(self) -> "EmbeddingsClient": + endpoint = await self.outer_instance.endpoints.get_default( + endpoint_type=EndpointType.SERVERLESS, populate_secrets=True + ) + if not endpoint: + raise ValueError("No serverless endpoint found") + + try: + from azure.ai.inference.aio import EmbeddingsClient + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" + ) + + if endpoint.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" + ) + from azure.core.credentials import AzureKeyCredential + + client = EmbeddingsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) + elif endpoint.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" + ) + client = EmbeddingsClient(endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential) + elif endpoint.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" + ) + client = EmbeddingsClient(endpoint=endpoint.endpoint_url, credential=endpoint.token_credential) + else: + raise ValueError("Unknown authentication type") + + return client + + async def get_azure_openai_client(self) -> "AzureOpenAI": + endpoint = await self.outer_instance.endpoints.get_default( + endpoint_type=EndpointType.AZURE_OPEN_AI, populate_secrets=True + ) + if not endpoint: + raise ValueError("No Azure OpenAI endpoint found.") + + try: + from openai_async import AzureOpenAI + except ModuleNotFoundError as _: + raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai-async'") + + if endpoint.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" + ) + client = AzureOpenAI( + api_key=endpoint.key, + azure_endpoint=endpoint.endpoint_url, + api_version="2024-08-01-preview", # TODO: Is this needed? + ) + elif endpoint.authentication_type == AuthenticationType.AAD: + logger.debug( + "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" + ) + try: + from azure.identity import get_bearer_token_provider + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "azure.identity package not installed. Please install it using 'pip install azure.identity'" + ) + client = AzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider( + endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=endpoint.endpoint_url, + api_version="2024-08-01-preview", + ) + elif endpoint.authentication_type == AuthenticationType.SAS: + logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") + client = AzureOpenAI( + azure_ad_token_provider=get_bearer_token_provider( + endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=endpoint.endpoint_url, + api_version="2024-08-01-preview", + ) + else: + raise ValueError("Unknown authentication type") + + return client + + +class EndpointsOperations(EndpointsOperationsGenerated): + + async def get_default(self, *, endpoint_type: EndpointType, populate_secrets: bool = False) -> EndpointProperties: + if not endpoint_type: + raise ValueError("You must specify an endpoint type") + # Since there is no notion of service default at the moment, always return the first one + async for endpoint_properties in self.list(endpoint_type=endpoint_type, populate_secrets=populate_secrets): + return endpoint_properties + return None + + async def get(self, *, endpoint_name: str, populate_secrets: bool = False) -> EndpointProperties: + if not endpoint_name: + raise ValueError("Endpoint name cannot be empty") + if populate_secrets: + connection: ConnectionsListSecretsResponse = await self._list_secrets( + connection_name_in_url=endpoint_name, + connection_name=endpoint_name, + subscription_id=self._config.subscription_id, + resource_group_name=self._config.resource_group_name, + workspace_name=self._config.workspace_name, + api_version_in_body=self._config.api_version, + ) + if connection.properties.auth_type == AuthenticationType.AAD: + return EndpointProperties(connection=connection, token_credential=self._config.credential) + elif connection.properties.auth_type == AuthenticationType.SAS: + from ...models._patch import SASTokenCredential + token_credential = SASTokenCredential( + sas_token=connection.properties.credentials.sas, + credential=self._config.credential, + subscription_id=self._config.subscription_id, + resource_group_name=self._config.resource_group_name, + workspace_name=self._config.workspace_name, + connection_name=endpoint_name, + ) + return EndpointProperties(connection=connection, token_credential=token_credential) + + return EndpointProperties(connection=connection) + else: + internal_response: ConnectionsListResponse = await self._list() + for connection in internal_response.value: + if endpoint_name == connection.name: + return EndpointProperties(connection=connection) + return None + + async def list( + self, *, endpoint_type: EndpointType | None = None, populate_secrets: bool = False + ) -> AsyncIterable[EndpointProperties]: + + # First make a REST call to /list to get all the connections, without secrets + connections_list: ConnectionsListResponse = await self._list() + + # Filter by connection type + for connection in connections_list.value: + if endpoint_type is None or connection.properties.category == endpoint_type: + if not populate_secrets: + yield EndpointProperties(connection=connection) + else: + yield await self.get(endpoint_name=connection.name, populate_secrets=True) + + +__all__: List[str] = [ + "EndpointsOperations", + "InferenceOperations", +] # Add all objects you want publicly available to users at this package level def patch_sdk(): @@ -18,3 +225,4 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ + diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index 38383ad48d89..99056fb76a51 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -188,7 +188,7 @@ def get_default(self, *, endpoint_type: EndpointType, populate_secrets: bool = F else: return None - def get(self, *, endpoint_name: str, populate_secrets: bool = False) -> ConnectionsListSecretsResponse: + def get(self, *, endpoint_name: str, populate_secrets: bool = False) -> EndpointProperties: if not endpoint_name: raise ValueError("Endpoint name cannot be empty") if populate_secrets: diff --git a/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py b/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py new file mode 100644 index 000000000000..2ee646d8eb7e --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py @@ -0,0 +1,141 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_endpoints_async + +DESCRIPTION: + This sample demonstrates how to enumerate and get endpoints from an AzureAIClient. + +USAGE: + python sample_endpoints_async + + Before running the sample: + + pip install azure.ai.client aiohttp azure-identity + + Set the environment variables with your own values: + 1) AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import asyncio +import os +from azure.ai.client.aio import AzureAIClient +from azure.ai.client.models import EndpointType, AuthenticationType +from openai import AzureOpenAI +from azure.ai.inference.aio import ChatCompletionsClient +from azure.ai.inference.models import UserMessage +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from azure.core.credentials import AzureKeyCredential + +async def sample_endpoints_async(): + + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # It should be in the format ";;;" + async with AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=os.environ["AI_CLIENT_CONNECTION_STRING"], + ) as ai_client: + + # Or, you can create the Azure AI Client by giving all required parameters directly + # async with AzureAIClient( + # credential=DefaultAzureCredential(), + # endpoint=os.environ["AI_CLIENT_ENDPOINT"], + # subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + # resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + # workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + # ) as ai_client: + + # List all endpoints of a particular "type", with or without their credentials: + print("====> Listing of all Azure Open AI endpoints:") + async for endpoint in ai_client.endpoints.list( + endpoint_type=EndpointType.AZURE_OPEN_AI, # Optional. Defaults to all types. + populate_secrets=True, # Optional. Defaults to "False" + ): + print(endpoint) + + # Get the default endpoint of a particular "type" (note that since at the moment the service + # does not have a notion of a default endpoint, this will return the first endpoint of that type): + endpoint = await ai_client.endpoints.get_default( + endpoint_type=EndpointType.AZURE_OPEN_AI, populate_secrets=True # Required. # Optional. Defaults to "False" + ) + print("====> Get default Azure Open AI endpoint:") + print(endpoint) + + # Get an endpoint by its name: + endpoint = await ai_client.endpoints.get( + endpoint_name=os.environ["AI_CLIENT_CONNECTION_NAME"], populate_secrets=True # Required. + ) + print("====> Get endpoint by name:") + print(endpoint) + + exit() + + # Here is how you would create the appropriate AOAI or Inference SDK for these endpoint + if endpoint.endpoint_type == EndpointType.AZURE_OPEN_AI: + + if endpoint.authentication_type == AuthenticationType.API_KEY: + print("====> Creating AzureOpenAI client using API key authentication") + client = AzureOpenAI( + api_key=endpoint.key, + azure_endpoint=endpoint.endpoint_url, + api_version="2024-08-01-preview", # TODO: Is this needed? + ) + elif endpoint.authentication_type == AuthenticationType.AAD: + print("====> Creating AzureOpenAI client using Entra ID authentication") + client = AzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider( + endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=endpoint.endpoint_url, + api_version="2024-08-01-preview", + ) + elif endpoint.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + print("====> Creating AzureOpenAI client using SAS authentication") + client = AzureOpenAI( + azure_ad_token_provider=get_bearer_token_provider( + endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=endpoint.endpoint_url, + api_version="2024-08-01-preview", + ) + + response = client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print(response.choices[0].message.content) + + elif endpoint.endpoint_type == EndpointType.SERVERLESS: + + if endpoint.authentication_type == AuthenticationType.API_KEY: + print("====> Creating ChatCompletionsClient using API key authentication") + client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) + elif endpoint.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + print("====> Creating ChatCompletionsClient using Entra ID authentication") + client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential) + elif endpoint.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + print("====> Creating ChatCompletionsClient using SAS authentication") + client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=endpoint.token_credential) + + response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + + print(response.choices[0].message.content) + +async def main(): + await sample_endpoints_async() + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index de2fabd8d81b..e19cc7191d84 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -1,3 +1,8 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + import os from azure.ai.client import AzureAIClient from azure.ai.client.models import EndpointType, AuthenticationType @@ -12,7 +17,6 @@ ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), connection=os.environ["AI_CLIENT_CONNECTION_STRING"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) # Or, you can create the Azure AI Client by giving all required parameters directly @@ -22,7 +26,6 @@ # subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], # resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], # workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], -# logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging # ) # List all endpoints of a particular "type", with or without their credentials: diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py new file mode 100644 index 000000000000..9406ed579964 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py @@ -0,0 +1,58 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_chat_completions_client_async.py + +DESCRIPTION: + This sample demonstrates how to get an authenticated ChatCompletionsClient + from the azure.ai.inference package, from an AzureAIClient. + +USAGE: + python sample_get_chat_completions_client_async.py + + Before running the sample: + + pip install azure.ai.client aiohttp azure-identity + + Set the environment variables with your own values: + 1) AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os +import asyncio +from azure.ai.client.aio import AzureAIClient +from azure.ai.inference.models import UserMessage +from azure.identity import DefaultAzureCredential + +async def sample_get_chat_completions_client_async(): + + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # It should have the format ";;;" + async with AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=os.environ["AI_CLIENT_CONNECTION_STRING"], + ) as ai_client: + + # Or, you can create the Azure AI Client by giving all required parameters directly + # async with AzureAIClient( + # credential=DefaultAzureCredential(), + # endpoint=os.environ["AI_CLIENT_ENDPOINT"], + # subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + # resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + # workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + # ) as ai_client: + + # Get an authenticated async ChatCompletionsClient (from azure.ai.inference) for your default Serverless connection: + async with await ai_client.inference.get_chat_completions_client() as client: + + response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + print(response.choices[0].message.content) + + +async def main(): + await sample_get_chat_completions_client_async() + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py index 933df0fb3c36..82ffeecdd8ef 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py @@ -1,3 +1,7 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ import os from azure.ai.client import AzureAIClient from azure.identity import DefaultAzureCredential diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py index c6238e59b5cf..07613f1ff1a8 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py @@ -1,3 +1,7 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ import os from azure.ai.client import AzureAIClient from azure.ai.inference.models import UserMessage diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py index 7adcef7db8c0..e0b6bf7d6e81 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py @@ -1,3 +1,7 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ import os from azure.ai.client import AzureAIClient from azure.identity import DefaultAzureCredential From 8935a4d2c2b94d1886065a7b73985ddb946c46d1 Mon Sep 17 00:00:00 2001 From: Ankit Singhal <30610298+singankit@users.noreply.github.com> Date: Wed, 9 Oct 2024 15:44:18 -0700 Subject: [PATCH 020/138] Users/singankit/evaluation add data mapping (#37824) * Update sample_evaluations.py * Update sample_evaluations.py --- .../azure-ai-client/samples/evaluations/sample_evaluations.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py index d9199dc6c379..cf260c12bcc0 100644 --- a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py +++ b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py @@ -40,6 +40,9 @@ "azure_endpoint": "https://ai-anksingai0771286510468288.openai.azure.com/", } }, + # data_mapping= { + # "response": "${data.answer}", "query": "${data.question}" + # } ), }, # This is needed as a workaround until environment gets published to registry From c4d3540621b9e68d2e498b67f2e135c66d0c7c2c Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 10 Oct 2024 09:21:32 -0700 Subject: [PATCH 021/138] Fix .inference.get_azure_openai_client() for the async AzureAIClient (#37828) --- .../azure/ai/client/aio/operations/_patch.py | 10 +- .../agents_cancel_run_maximum_set_gen.py | 45 -- ...vector_store_file_batch_minimum_set_gen.py | 45 -- .../agents_delete_agent_minimum_set_gen.py | 44 -- .../agents_delete_file_minimum_set_gen.py | 44 -- .../agents_delete_thread_minimum_set_gen.py | 44 -- ...elete_vector_store_file_minimum_set_gen.py | 45 -- ...nts_delete_vector_store_minimum_set_gen.py | 44 -- .../agents_get_agent_minimum_set_gen.py | 44 -- ...agents_get_file_content_minimum_set_gen.py | 44 -- .../agents_get_file_minimum_set_gen.py | 44 -- .../agents_get_message_minimum_set_gen.py | 45 -- .../agents_get_run_maximum_set_gen.py | 45 -- .../agents_get_run_step_minimum_set_gen.py | 46 -- .../agents_get_thread_minimum_set_gen.py | 44 -- ...vector_store_file_batch_minimum_set_gen.py | 45 -- ...s_get_vector_store_file_minimum_set_gen.py | 45 -- ...agents_get_vector_store_minimum_set_gen.py | 44 -- .../agents_list_agents_minimum_set_gen.py | 42 -- .../agents_list_files_minimum_set_gen.py | 42 -- .../agents_list_messages_minimum_set_gen.py | 44 -- .../agents_list_run_steps_minimum_set_gen.py | 45 -- .../agents_list_runs_maximum_set_gen.py | 44 -- ..._store_file_batch_files_minimum_set_gen.py | 45 -- ...list_vector_store_files_minimum_set_gen.py | 44 -- ...ents_list_vector_stores_minimum_set_gen.py | 42 -- .../agents_update_run_maximum_set_gen.py | 46 -- .../evaluations_create_maximum_set_gen.py | 55 -- .../evaluations_create_minimum_set_gen.py | 47 -- .../evaluations_get_maximum_set_gen.py | 44 -- .../evaluations_get_minimum_set_gen.py | 44 -- .../evaluations_list_maximum_set_gen.py | 43 -- .../evaluations_list_minimum_set_gen.py | 43 -- .../evaluations_update_maximum_set_gen.py | 49 -- .../generated_tests/conftest.py | 35 - .../generated_tests/test_agents_operations.py | 606 ----------------- .../test_agents_operations_async.py | 607 ------------------ .../test_azure_ai_agents_operations.py | 606 ----------------- .../test_azure_ai_agents_operations_async.py | 607 ------------------ .../test_azure_ai_evaluations_operations.py | 146 ----- ...t_azure_ai_evaluations_operations_async.py | 147 ----- .../test_endpoints_operations.py | 45 -- .../test_endpoints_operations_async.py | 46 -- .../test_evaluations_operations.py | 71 -- .../test_evaluations_operations_async.py | 72 --- .../generated_tests/testpreparer.py | 24 - .../generated_tests/testpreparer_async.py | 20 - .../sample_get_azure_openai_client_async.py | 65 ++ ...ample_get_chat_completions_client_async.py | 8 +- .../sample_get_embeddings_client_async.py | 63 ++ .../sample_get_azure_openai_client.py | 18 + .../sample_get_chat_completions_client.py | 18 + .../inference/sample_get_embeddings_client.py | 18 + 53 files changed, 191 insertions(+), 4517 deletions(-) delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_cancel_run_maximum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_cancel_vector_store_file_batch_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_delete_agent_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_delete_file_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_delete_thread_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_file_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_agent_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_file_content_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_file_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_message_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_run_maximum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_run_step_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_thread_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_batch_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_agents_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_files_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_messages_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_run_steps_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_runs_maximum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_file_batch_files_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_files_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_list_vector_stores_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/agents_update_run_maximum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_create_maximum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_create_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_get_maximum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_get_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_list_maximum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_list_minimum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_samples/evaluations_update_maximum_set_gen.py delete mode 100644 sdk/ai/azure-ai-client/generated_tests/conftest.py delete mode 100644 sdk/ai/azure-ai-client/generated_tests/test_agents_operations.py delete mode 100644 sdk/ai/azure-ai-client/generated_tests/test_agents_operations_async.py delete mode 100644 sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations.py delete mode 100644 sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations_async.py delete mode 100644 sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations.py delete mode 100644 sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations_async.py delete mode 100644 sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations.py delete mode 100644 sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations_async.py delete mode 100644 sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations.py delete mode 100644 sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations_async.py delete mode 100644 sdk/ai/azure-ai-client/generated_tests/testpreparer.py delete mode 100644 sdk/ai/azure-ai-client/generated_tests/testpreparer_async.py create mode 100644 sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py create mode 100644 sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index c424c6d97b44..950d65855134 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -98,7 +98,7 @@ async def get_embeddings_client(self) -> "EmbeddingsClient": return client - async def get_azure_openai_client(self) -> "AzureOpenAI": + async def get_azure_openai_client(self) -> "AsyncAzureOpenAI": endpoint = await self.outer_instance.endpoints.get_default( endpoint_type=EndpointType.AZURE_OPEN_AI, populate_secrets=True ) @@ -106,7 +106,7 @@ async def get_azure_openai_client(self) -> "AzureOpenAI": raise ValueError("No Azure OpenAI endpoint found.") try: - from openai_async import AzureOpenAI + from openai import AsyncAzureOpenAI except ModuleNotFoundError as _: raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai-async'") @@ -114,7 +114,7 @@ async def get_azure_openai_client(self) -> "AzureOpenAI": logger.debug( "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" ) - client = AzureOpenAI( + client = AsyncAzureOpenAI( api_key=endpoint.key, azure_endpoint=endpoint.endpoint_url, api_version="2024-08-01-preview", # TODO: Is this needed? @@ -129,7 +129,7 @@ async def get_azure_openai_client(self) -> "AzureOpenAI": raise ModuleNotFoundError( "azure.identity package not installed. Please install it using 'pip install azure.identity'" ) - client = AzureOpenAI( + client = AsyncAzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( endpoint.token_credential, "https://cognitiveservices.azure.com/.default" @@ -139,7 +139,7 @@ async def get_azure_openai_client(self) -> "AzureOpenAI": ) elif endpoint.authentication_type == AuthenticationType.SAS: logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") - client = AzureOpenAI( + client = AsyncAzureOpenAI( azure_ad_token_provider=get_bearer_token_provider( endpoint.token_credential, "https://cognitiveservices.azure.com/.default" ), diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_cancel_run_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_cancel_run_maximum_set_gen.py deleted file mode 100644 index 0fc09fadab61..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_cancel_run_maximum_set_gen.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_cancel_run_maximum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.cancel_run( - thread_id="sxximmrzssszbrz", - run_id="reoghmjcd", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_CancelRun_MaximumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_cancel_vector_store_file_batch_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_cancel_vector_store_file_batch_minimum_set_gen.py deleted file mode 100644 index 3108e9a4632c..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_cancel_vector_store_file_batch_minimum_set_gen.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_cancel_vector_store_file_batch_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.cancel_vector_store_file_batch( - vector_store_id="esqgxemsdquv", - batch_id="ukjhmcvwhahdrhhiiyortbbjlhx", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_CancelVectorStoreFileBatch_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_delete_agent_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_delete_agent_minimum_set_gen.py deleted file mode 100644 index 5eedea350f06..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_delete_agent_minimum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_delete_agent_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.delete_agent( - assistant_id="gjbkohvdtcvgmgubguj", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_DeleteAgent_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_delete_file_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_delete_file_minimum_set_gen.py deleted file mode 100644 index d027aed6ae44..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_delete_file_minimum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_delete_file_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.delete_file( - file_id="flyag", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_DeleteFile_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_delete_thread_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_delete_thread_minimum_set_gen.py deleted file mode 100644 index 405b18477a63..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_delete_thread_minimum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_delete_thread_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.delete_thread( - thread_id="zwmmodqpcvxghsghkjw", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_DeleteThread_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_file_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_file_minimum_set_gen.py deleted file mode 100644 index 92f956c6ff5f..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_file_minimum_set_gen.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_delete_vector_store_file_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.delete_vector_store_file( - vector_store_id="hbnrqrcvbsowbuksdrtcqi", - file_id="vzshxliuznhftv", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_DeleteVectorStoreFile_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_minimum_set_gen.py deleted file mode 100644 index 11417b53fa64..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_delete_vector_store_minimum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_delete_vector_store_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.delete_vector_store( - vector_store_id="sshdziwghotwucptzhzgdxpggsedy", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_DeleteVectorStore_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_agent_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_agent_minimum_set_gen.py deleted file mode 100644 index 5163c2d9357b..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_get_agent_minimum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_get_agent_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.get_agent( - assistant_id="aorarcltzoneckqmfoluejbhgbm", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_GetAgent_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_file_content_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_file_content_minimum_set_gen.py deleted file mode 100644 index c32dfdd03333..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_get_file_content_minimum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_get_file_content_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.get_file_content( - file_id="oudalhdmazgj", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_GetFileContent_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_file_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_file_minimum_set_gen.py deleted file mode 100644 index 4f657dffaa2b..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_get_file_minimum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_get_file_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.get_file( - file_id="ahlwbnjjg", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_GetFile_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_message_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_message_minimum_set_gen.py deleted file mode 100644 index 7f014bca5159..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_get_message_minimum_set_gen.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_get_message_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.get_message( - thread_id="secglofbhaocemzzskoeatl", - message_id="axpvtnmnjmpctkmnoswam", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_GetMessage_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_run_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_run_maximum_set_gen.py deleted file mode 100644 index c939bf8d423b..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_get_run_maximum_set_gen.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_get_run_maximum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.get_run( - thread_id="ilcsibdqbvldqyvmbjjtvirntap", - run_id="scpdacucqpuoqjihekkeolfpdyeea", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_GetRun_MaximumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_run_step_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_run_step_minimum_set_gen.py deleted file mode 100644 index 747950b6e57f..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_get_run_step_minimum_set_gen.py +++ /dev/null @@ -1,46 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_get_run_step_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.get_run_step( - thread_id="jwopmhvryvcpltxhimyrvkcwel", - run_id="echizvrhsigfce", - step_id="kc", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_GetRunStep_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_thread_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_thread_minimum_set_gen.py deleted file mode 100644 index e549f5ee4f80..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_get_thread_minimum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_get_thread_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.get_thread( - thread_id="tyfreqamibskuzfoyo", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_GetThread_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_batch_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_batch_minimum_set_gen.py deleted file mode 100644 index d1084d004d10..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_batch_minimum_set_gen.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_get_vector_store_file_batch_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.get_vector_store_file_batch( - vector_store_id="lbs", - batch_id="zxqbzpge", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_GetVectorStoreFileBatch_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_minimum_set_gen.py deleted file mode 100644 index ed62cc06a203..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_file_minimum_set_gen.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_get_vector_store_file_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.get_vector_store_file( - vector_store_id="tcpppckhbcqnmxcpqnsdhmocbfkvw", - file_id="swmxwhxjbntnayymmrrocysdsrio", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_GetVectorStoreFile_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_minimum_set_gen.py deleted file mode 100644 index 6fa3b60a5e04..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_get_vector_store_minimum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_get_vector_store_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.get_vector_store( - vector_store_id="imldyfrenvwrdahxomrvypghklgzfm", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_GetVectorStore_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_agents_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_agents_minimum_set_gen.py deleted file mode 100644 index 88cf1458e1ce..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_list_agents_minimum_set_gen.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_list_agents_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.list_agents() - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_ListAgents_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_files_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_files_minimum_set_gen.py deleted file mode 100644 index 81f7cd719fe5..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_list_files_minimum_set_gen.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_list_files_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.list_files() - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_ListFiles_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_messages_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_messages_minimum_set_gen.py deleted file mode 100644 index 39c7f858737f..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_list_messages_minimum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_list_messages_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.list_messages( - thread_id="bpo", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_ListMessages_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_run_steps_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_run_steps_minimum_set_gen.py deleted file mode 100644 index 783eed5ea9ce..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_list_run_steps_minimum_set_gen.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_list_run_steps_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.list_run_steps( - thread_id="rgpbsplbzxqewjirwdhbcvc", - run_id="jjtdybnzsckqsid", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_ListRunSteps_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_runs_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_runs_maximum_set_gen.py deleted file mode 100644 index 4827529a038e..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_list_runs_maximum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_list_runs_maximum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.list_runs( - thread_id="mkbrj", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_ListRuns_MaximumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_file_batch_files_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_file_batch_files_minimum_set_gen.py deleted file mode 100644 index 1b54f84a9f2d..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_file_batch_files_minimum_set_gen.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_list_vector_store_file_batch_files_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.list_vector_store_file_batch_files( - vector_store_id="vdelhyyzsgiavifhhzvtvqeqhhsuh", - batch_id="gffqfjubkoliaarvbyq", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_ListVectorStoreFileBatchFiles_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_files_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_files_minimum_set_gen.py deleted file mode 100644 index 405fc7ae13d9..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_store_files_minimum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_list_vector_store_files_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.list_vector_store_files( - vector_store_id="al", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_ListVectorStoreFiles_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_stores_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_stores_minimum_set_gen.py deleted file mode 100644 index b731ec0b53d4..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_list_vector_stores_minimum_set_gen.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_list_vector_stores_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.list_vector_stores() - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_ListVectorStores_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/agents_update_run_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/agents_update_run_maximum_set_gen.py deleted file mode 100644 index a04f7a260322..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/agents_update_run_maximum_set_gen.py +++ /dev/null @@ -1,46 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python agents_update_run_maximum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.agents.update_run( - thread_id="rtzvfjw", - run_id="ibopwyspzczc", - body={"metadata": {"key5931": "ojeukdviplvt"}}, - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Agents_UpdateRun_MaximumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_create_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_create_maximum_set_gen.py deleted file mode 100644 index 86f5b25179f9..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/evaluations_create_maximum_set_gen.py +++ /dev/null @@ -1,55 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python evaluations_create_maximum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.evaluations.create( - evaluation={ - "data": {"Uri": "bzelztdbitxoxsrqlthlnacjssata", "type": "Evaluations.InputData"}, - "description": "dfydblbzvilyvhdtqo", - "displayName": "lresoznoqbpwvsummatfyc", - "evaluators": { - "key4462": {"dataMapping": {"key6077": "rkegahkqoagtfoxuxizgo"}, "id": "bodhxfmbqquu", "initParams": {}} - }, - "id": "qfkteoypwndeirixziosai", - "properties": {"key6705": "eooztvyhcibkecr"}, - "systemData": {}, - "tags": {"key9950": "umaulvau"}, - }, - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Evaluations_Create_MaximumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_create_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_create_minimum_set_gen.py deleted file mode 100644 index b74d321cf74d..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/evaluations_create_minimum_set_gen.py +++ /dev/null @@ -1,47 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python evaluations_create_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.evaluations.create( - evaluation={ - "data": {"Uri": "bzelztdbitxoxsrqlthlnacjssata", "type": "Evaluations.InputData"}, - "evaluators": {}, - }, - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Evaluations_Create_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_get_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_get_maximum_set_gen.py deleted file mode 100644 index a20226495dcf..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/evaluations_get_maximum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python evaluations_get_maximum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.evaluations.get( - id="93-", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Evaluations_Get_MaximumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_get_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_get_minimum_set_gen.py deleted file mode 100644 index 33bd9bc4aede..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/evaluations_get_minimum_set_gen.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python evaluations_get_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.evaluations.get( - id="0_0", - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Evaluations_Get_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_list_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_list_maximum_set_gen.py deleted file mode 100644 index e472a0399432..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/evaluations_list_maximum_set_gen.py +++ /dev/null @@ -1,43 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python evaluations_list_maximum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.evaluations.list() - for item in response: - print(item) - - -# x-ms-original-file: 2024-07-01-preview/Evaluations_List_MaximumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_list_minimum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_list_minimum_set_gen.py deleted file mode 100644 index 9c4f643f6f9c..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/evaluations_list_minimum_set_gen.py +++ /dev/null @@ -1,43 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python evaluations_list_minimum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.evaluations.list() - for item in response: - print(item) - - -# x-ms-original-file: 2024-07-01-preview/Evaluations_List_MinimumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_samples/evaluations_update_maximum_set_gen.py b/sdk/ai/azure-ai-client/generated_samples/evaluations_update_maximum_set_gen.py deleted file mode 100644 index dcf90380de75..000000000000 --- a/sdk/ai/azure-ai-client/generated_samples/evaluations_update_maximum_set_gen.py +++ /dev/null @@ -1,49 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.identity import DefaultAzureCredential - -from azure.ai.client import AzureAIClient - -""" -# PREREQUISITES - pip install azure-identity - pip install azure-ai-client -# USAGE - python evaluations_update_maximum_set_gen.py - - Before run the sample, please set the values of the client ID, tenant ID and client secret - of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, - AZURE_CLIENT_SECRET. For more info about how to get the value, please see: - https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal -""" - - -def main(): - client = AzureAIClient( - endpoint="ENDPOINT", - subscription_id="SUBSCRIPTION_ID", - resource_group_name="RESOURCE_GROUP_NAME", - workspace_name="WORKSPACE_NAME", - credential=DefaultAzureCredential(), - ) - - response = client.evaluations.update( - id="8y", - update_request={ - "description": "vl", - "displayName": "zkystmqhvncvxnxrhahhulbui", - "tags": {"key6951": "mirtkcesgent"}, - }, - ) - print(response) - - -# x-ms-original-file: 2024-07-01-preview/Evaluations_Update_MaximumSet_Gen.json -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/generated_tests/conftest.py b/sdk/ai/azure-ai-client/generated_tests/conftest.py deleted file mode 100644 index 72c378f12508..000000000000 --- a/sdk/ai/azure-ai-client/generated_tests/conftest.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import os -import pytest -from dotenv import load_dotenv -from devtools_testutils import ( - test_proxy, - add_general_regex_sanitizer, - add_body_key_sanitizer, - add_header_regex_sanitizer, -) - -load_dotenv() - - -# For security, please avoid record sensitive identity information in recordings -@pytest.fixture(scope="session", autouse=True) -def add_sanitizers(test_proxy): - azureai_subscription_id = os.environ.get("AZUREAI_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") - azureai_tenant_id = os.environ.get("AZUREAI_TENANT_ID", "00000000-0000-0000-0000-000000000000") - azureai_client_id = os.environ.get("AZUREAI_CLIENT_ID", "00000000-0000-0000-0000-000000000000") - azureai_client_secret = os.environ.get("AZUREAI_CLIENT_SECRET", "00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=azureai_subscription_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=azureai_tenant_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=azureai_client_id, value="00000000-0000-0000-0000-000000000000") - add_general_regex_sanitizer(regex=azureai_client_secret, value="00000000-0000-0000-0000-000000000000") - - add_header_regex_sanitizer(key="Set-Cookie", value="[set-cookie;]") - add_header_regex_sanitizer(key="Cookie", value="cookie;") - add_body_key_sanitizer(json_path="$..access_token", value="access_token") diff --git a/sdk/ai/azure-ai-client/generated_tests/test_agents_operations.py b/sdk/ai/azure-ai-client/generated_tests/test_agents_operations.py deleted file mode 100644 index cc587a49dc53..000000000000 --- a/sdk/ai/azure-ai-client/generated_tests/test_agents_operations.py +++ /dev/null @@ -1,606 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import ClientTestBase, Preparer - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAgentsOperations(ClientTestBase): - @Preparer() - @recorded_by_proxy - def test_agents_create_agent(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.create_agent( - body={ - "model": "str", - "description": "str", - "instructions": "str", - "metadata": {"str": "str"}, - "name": "str", - "response_format": "str", - "temperature": 0.0, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - "tools": ["tool_definition"], - "top_p": 0.0, - }, - model="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_list_agents(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.list_agents() - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_get_agent(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.get_agent( - agent_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_update_agent(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.update_agent( - agent_id="str", - body={ - "description": "str", - "instructions": "str", - "metadata": {"str": "str"}, - "model": "str", - "name": "str", - "response_format": "str", - "temperature": 0.0, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - "tools": ["tool_definition"], - "top_p": 0.0, - }, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_delete_agent(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.delete_agent( - agent_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_create_thread(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.create_thread( - body={ - "messages": [ - { - "content": "str", - "role": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "metadata": {"str": "str"}, - } - ], - "metadata": {"str": "str"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - }, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_get_thread(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.get_thread( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_update_thread(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.update_thread( - thread_id="str", - body={ - "metadata": {"str": "str"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - }, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_delete_thread(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.delete_thread( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_create_message(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.create_message( - thread_id="str", - body={ - "content": "str", - "role": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "metadata": {"str": "str"}, - }, - role="str", - content="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_list_messages(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.list_messages( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_get_message(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.get_message( - thread_id="str", - message_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_update_message(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.update_message( - thread_id="str", - message_id="str", - body={"metadata": {"str": "str"}}, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_create_run(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.create_run( - thread_id="str", - body={ - "agent_id": "str", - "additional_instructions": "str", - "additional_messages": [ - { - "agent_id": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "completed_at": "2020-02-20 00:00:00", - "content": ["message_content"], - "created_at": "2020-02-20 00:00:00", - "id": "str", - "incomplete_at": "2020-02-20 00:00:00", - "incomplete_details": {"reason": "str"}, - "metadata": {"str": "str"}, - "object": "thread.message", - "role": "str", - "run_id": "str", - "status": "str", - "thread_id": "str", - } - ], - "instructions": "str", - "max_completion_tokens": 0, - "max_prompt_tokens": 0, - "metadata": {"str": "str"}, - "model": "str", - "response_format": "str", - "stream": bool, - "temperature": 0.0, - "tool_choice": "str", - "tools": ["tool_definition"], - "top_p": 0.0, - "truncation_strategy": {"type": "str", "last_messages": 0}, - }, - agent_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_list_runs(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.list_runs( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_get_run(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.get_run( - thread_id="str", - run_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_update_run(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.update_run( - thread_id="str", - run_id="str", - body={"metadata": {"str": "str"}}, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_submit_tool_outputs_to_run(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.submit_tool_outputs_to_run( - thread_id="str", - run_id="str", - body={"tool_outputs": [{"output": "str", "tool_call_id": "str"}], "stream": bool}, - tool_outputs=[{"output": "str", "tool_call_id": "str"}], - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_cancel_run(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.cancel_run( - thread_id="str", - run_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_create_thread_and_run(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.create_thread_and_run( - body={ - "agent_id": "str", - "instructions": "str", - "max_completion_tokens": 0, - "max_prompt_tokens": 0, - "metadata": {"str": "str"}, - "model": "str", - "response_format": "str", - "stream": bool, - "temperature": 0.0, - "thread": { - "messages": [ - { - "content": "str", - "role": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "metadata": {"str": "str"}, - } - ], - "metadata": {"str": "str"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - }, - "tool_choice": "str", - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - "tools": ["tool_definition"], - "top_p": 0.0, - "truncation_strategy": {"type": "str", "last_messages": 0}, - }, - agent_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_get_run_step(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.get_run_step( - thread_id="str", - run_id="str", - step_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_list_run_steps(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.list_run_steps( - thread_id="str", - run_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_list_files(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.list_files() - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_upload_file(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.upload_file( - body={"file": "filetype", "purpose": "str", "filename": "str"}, - file="filetype", - purpose="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_delete_file(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.delete_file( - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_get_file(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.get_file( - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_get_file_content(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.get_file_content( - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_list_vector_stores(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.list_vector_stores() - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_create_vector_store(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.create_vector_store( - body={ - "chunking_strategy": "vector_store_chunking_strategy_request", - "expires_after": {"anchor": "str", "days": 0}, - "file_ids": ["str"], - "metadata": {"str": "str"}, - "name": "str", - }, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_get_vector_store(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.get_vector_store( - vector_store_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_modify_vector_store(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.modify_vector_store( - vector_store_id="str", - body={"expires_after": {"anchor": "str", "days": 0}, "metadata": {"str": "str"}, "name": "str"}, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_delete_vector_store(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.delete_vector_store( - vector_store_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_list_vector_store_files(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.list_vector_store_files( - vector_store_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_create_vector_store_file(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.create_vector_store_file( - vector_store_id="str", - body={"file_id": "str", "chunking_strategy": "vector_store_chunking_strategy_request"}, - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_get_vector_store_file(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.get_vector_store_file( - vector_store_id="str", - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_delete_vector_store_file(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.delete_vector_store_file( - vector_store_id="str", - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_create_vector_store_file_batch(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.create_vector_store_file_batch( - vector_store_id="str", - body={"file_ids": ["str"], "chunking_strategy": "vector_store_chunking_strategy_request"}, - file_ids=["str"], - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_get_vector_store_file_batch(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.get_vector_store_file_batch( - vector_store_id="str", - batch_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_cancel_vector_store_file_batch(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.cancel_vector_store_file_batch( - vector_store_id="str", - batch_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_agents_list_vector_store_file_batch_files(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.agents.list_vector_store_file_batch_files( - vector_store_id="str", - batch_id="str", - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_agents_operations_async.py b/sdk/ai/azure-ai-client/generated_tests/test_agents_operations_async.py deleted file mode 100644 index 56536356a3ae..000000000000 --- a/sdk/ai/azure-ai-client/generated_tests/test_agents_operations_async.py +++ /dev/null @@ -1,607 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import Preparer -from testpreparer_async import ClientTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAgentsOperationsAsync(ClientTestBaseAsync): - @Preparer() - @recorded_by_proxy_async - async def test_agents_create_agent(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.create_agent( - body={ - "model": "str", - "description": "str", - "instructions": "str", - "metadata": {"str": "str"}, - "name": "str", - "response_format": "str", - "temperature": 0.0, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - "tools": ["tool_definition"], - "top_p": 0.0, - }, - model="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_list_agents(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.list_agents() - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_get_agent(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.get_agent( - agent_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_update_agent(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.update_agent( - agent_id="str", - body={ - "description": "str", - "instructions": "str", - "metadata": {"str": "str"}, - "model": "str", - "name": "str", - "response_format": "str", - "temperature": 0.0, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - "tools": ["tool_definition"], - "top_p": 0.0, - }, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_delete_agent(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.delete_agent( - agent_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_create_thread(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.create_thread( - body={ - "messages": [ - { - "content": "str", - "role": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "metadata": {"str": "str"}, - } - ], - "metadata": {"str": "str"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - }, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_get_thread(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.get_thread( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_update_thread(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.update_thread( - thread_id="str", - body={ - "metadata": {"str": "str"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - }, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_delete_thread(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.delete_thread( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_create_message(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.create_message( - thread_id="str", - body={ - "content": "str", - "role": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "metadata": {"str": "str"}, - }, - role="str", - content="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_list_messages(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.list_messages( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_get_message(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.get_message( - thread_id="str", - message_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_update_message(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.update_message( - thread_id="str", - message_id="str", - body={"metadata": {"str": "str"}}, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_create_run(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.create_run( - thread_id="str", - body={ - "agent_id": "str", - "additional_instructions": "str", - "additional_messages": [ - { - "agent_id": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "completed_at": "2020-02-20 00:00:00", - "content": ["message_content"], - "created_at": "2020-02-20 00:00:00", - "id": "str", - "incomplete_at": "2020-02-20 00:00:00", - "incomplete_details": {"reason": "str"}, - "metadata": {"str": "str"}, - "object": "thread.message", - "role": "str", - "run_id": "str", - "status": "str", - "thread_id": "str", - } - ], - "instructions": "str", - "max_completion_tokens": 0, - "max_prompt_tokens": 0, - "metadata": {"str": "str"}, - "model": "str", - "response_format": "str", - "stream": bool, - "temperature": 0.0, - "tool_choice": "str", - "tools": ["tool_definition"], - "top_p": 0.0, - "truncation_strategy": {"type": "str", "last_messages": 0}, - }, - agent_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_list_runs(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.list_runs( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_get_run(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.get_run( - thread_id="str", - run_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_update_run(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.update_run( - thread_id="str", - run_id="str", - body={"metadata": {"str": "str"}}, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_submit_tool_outputs_to_run(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.submit_tool_outputs_to_run( - thread_id="str", - run_id="str", - body={"tool_outputs": [{"output": "str", "tool_call_id": "str"}], "stream": bool}, - tool_outputs=[{"output": "str", "tool_call_id": "str"}], - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_cancel_run(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.cancel_run( - thread_id="str", - run_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_create_thread_and_run(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.create_thread_and_run( - body={ - "agent_id": "str", - "instructions": "str", - "max_completion_tokens": 0, - "max_prompt_tokens": 0, - "metadata": {"str": "str"}, - "model": "str", - "response_format": "str", - "stream": bool, - "temperature": 0.0, - "thread": { - "messages": [ - { - "content": "str", - "role": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "metadata": {"str": "str"}, - } - ], - "metadata": {"str": "str"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - }, - "tool_choice": "str", - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - "tools": ["tool_definition"], - "top_p": 0.0, - "truncation_strategy": {"type": "str", "last_messages": 0}, - }, - agent_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_get_run_step(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.get_run_step( - thread_id="str", - run_id="str", - step_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_list_run_steps(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.list_run_steps( - thread_id="str", - run_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_list_files(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.list_files() - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_upload_file(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.upload_file( - body={"file": "filetype", "purpose": "str", "filename": "str"}, - file="filetype", - purpose="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_delete_file(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.delete_file( - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_get_file(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.get_file( - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_get_file_content(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.get_file_content( - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_list_vector_stores(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.list_vector_stores() - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_create_vector_store(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.create_vector_store( - body={ - "chunking_strategy": "vector_store_chunking_strategy_request", - "expires_after": {"anchor": "str", "days": 0}, - "file_ids": ["str"], - "metadata": {"str": "str"}, - "name": "str", - }, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_get_vector_store(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.get_vector_store( - vector_store_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_modify_vector_store(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.modify_vector_store( - vector_store_id="str", - body={"expires_after": {"anchor": "str", "days": 0}, "metadata": {"str": "str"}, "name": "str"}, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_delete_vector_store(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.delete_vector_store( - vector_store_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_list_vector_store_files(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.list_vector_store_files( - vector_store_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_create_vector_store_file(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.create_vector_store_file( - vector_store_id="str", - body={"file_id": "str", "chunking_strategy": "vector_store_chunking_strategy_request"}, - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_get_vector_store_file(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.get_vector_store_file( - vector_store_id="str", - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_delete_vector_store_file(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.delete_vector_store_file( - vector_store_id="str", - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_create_vector_store_file_batch(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.create_vector_store_file_batch( - vector_store_id="str", - body={"file_ids": ["str"], "chunking_strategy": "vector_store_chunking_strategy_request"}, - file_ids=["str"], - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_get_vector_store_file_batch(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.get_vector_store_file_batch( - vector_store_id="str", - batch_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_cancel_vector_store_file_batch(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.cancel_vector_store_file_batch( - vector_store_id="str", - batch_id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_agents_list_vector_store_file_batch_files(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.agents.list_vector_store_file_batch_files( - vector_store_id="str", - batch_id="str", - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations.py b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations.py deleted file mode 100644 index 2ef135f90409..000000000000 --- a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations.py +++ /dev/null @@ -1,606 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import AzureAIClientTestBase, AzureAIPreparer - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAzureAIAgentsOperations(AzureAIClientTestBase): - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_create_agent(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.create_agent( - body={ - "model": "str", - "description": "str", - "instructions": "str", - "metadata": {"str": "str"}, - "name": "str", - "response_format": "str", - "temperature": 0.0, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - "tools": ["tool_definition"], - "top_p": 0.0, - }, - model="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_list_agents(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.list_agents() - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_get_agent(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.get_agent( - assistant_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_update_agent(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.update_agent( - assistant_id="str", - body={ - "description": "str", - "instructions": "str", - "metadata": {"str": "str"}, - "model": "str", - "name": "str", - "response_format": "str", - "temperature": 0.0, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - "tools": ["tool_definition"], - "top_p": 0.0, - }, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_delete_agent(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.delete_agent( - assistant_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_create_thread(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.create_thread( - body={ - "messages": [ - { - "content": "str", - "role": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "metadata": {"str": "str"}, - } - ], - "metadata": {"str": "str"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - }, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_get_thread(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.get_thread( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_update_thread(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.update_thread( - thread_id="str", - body={ - "metadata": {"str": "str"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - }, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_delete_thread(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.delete_thread( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_create_message(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.create_message( - thread_id="str", - body={ - "content": "str", - "role": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "metadata": {"str": "str"}, - }, - role="str", - content="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_list_messages(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.list_messages( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_get_message(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.get_message( - thread_id="str", - message_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_update_message(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.update_message( - thread_id="str", - message_id="str", - body={"metadata": {"str": "str"}}, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_create_run(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.create_run( - thread_id="str", - body={ - "assistant_id": "str", - "additional_instructions": "str", - "additional_messages": [ - { - "assistant_id": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "completed_at": "2020-02-20 00:00:00", - "content": ["message_content"], - "created_at": "2020-02-20 00:00:00", - "id": "str", - "incomplete_at": "2020-02-20 00:00:00", - "incomplete_details": {"reason": "str"}, - "metadata": {"str": "str"}, - "object": "thread.message", - "role": "str", - "run_id": "str", - "status": "str", - "thread_id": "str", - } - ], - "instructions": "str", - "max_completion_tokens": 0, - "max_prompt_tokens": 0, - "metadata": {"str": "str"}, - "model": "str", - "response_format": "str", - "stream": bool, - "temperature": 0.0, - "tool_choice": "str", - "tools": ["tool_definition"], - "top_p": 0.0, - "truncation_strategy": {"type": "str", "last_messages": 0}, - }, - assistant_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_list_runs(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.list_runs( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_get_run(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.get_run( - thread_id="str", - run_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_update_run(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.update_run( - thread_id="str", - run_id="str", - body={"metadata": {"str": "str"}}, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_submit_tool_outputs_to_run(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.submit_tool_outputs_to_run( - thread_id="str", - run_id="str", - body={"tool_outputs": [{"output": "str", "tool_call_id": "str"}], "stream": bool}, - tool_outputs=[{"output": "str", "tool_call_id": "str"}], - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_cancel_run(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.cancel_run( - thread_id="str", - run_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_create_thread_and_run(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.create_thread_and_run( - body={ - "assistant_id": "str", - "instructions": "str", - "max_completion_tokens": 0, - "max_prompt_tokens": 0, - "metadata": {"str": "str"}, - "model": "str", - "response_format": "str", - "stream": bool, - "temperature": 0.0, - "thread": { - "messages": [ - { - "content": "str", - "role": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "metadata": {"str": "str"}, - } - ], - "metadata": {"str": "str"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - }, - "tool_choice": "str", - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - "tools": ["tool_definition"], - "top_p": 0.0, - "truncation_strategy": {"type": "str", "last_messages": 0}, - }, - assistant_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_get_run_step(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.get_run_step( - thread_id="str", - run_id="str", - step_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_list_run_steps(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.list_run_steps( - thread_id="str", - run_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_list_files(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.list_files() - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_upload_file(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.upload_file( - body={"file": "filetype", "purpose": "str", "filename": "str"}, - file="filetype", - purpose="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_delete_file(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.delete_file( - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_get_file(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.get_file( - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_get_file_content(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.get_file_content( - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_list_vector_stores(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.list_vector_stores() - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_create_vector_store(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.create_vector_store( - body={ - "chunking_strategy": "vector_store_chunking_strategy_request", - "expires_after": {"anchor": "str", "days": 0}, - "file_ids": ["str"], - "metadata": {"str": "str"}, - "name": "str", - }, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_get_vector_store(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.get_vector_store( - vector_store_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_modify_vector_store(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.modify_vector_store( - vector_store_id="str", - body={"expires_after": {"anchor": "str", "days": 0}, "metadata": {"str": "str"}, "name": "str"}, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_delete_vector_store(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.delete_vector_store( - vector_store_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_list_vector_store_files(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.list_vector_store_files( - vector_store_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_create_vector_store_file(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.create_vector_store_file( - vector_store_id="str", - body={"file_id": "str", "chunking_strategy": "vector_store_chunking_strategy_request"}, - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_get_vector_store_file(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.get_vector_store_file( - vector_store_id="str", - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_delete_vector_store_file(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.delete_vector_store_file( - vector_store_id="str", - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_create_vector_store_file_batch(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.create_vector_store_file_batch( - vector_store_id="str", - body={"file_ids": ["str"], "chunking_strategy": "vector_store_chunking_strategy_request"}, - file_ids=["str"], - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_get_vector_store_file_batch(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.get_vector_store_file_batch( - vector_store_id="str", - batch_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_cancel_vector_store_file_batch(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.cancel_vector_store_file_batch( - vector_store_id="str", - batch_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_agents_list_vector_store_file_batch_files(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.agents.list_vector_store_file_batch_files( - vector_store_id="str", - batch_id="str", - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations_async.py b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations_async.py deleted file mode 100644 index e893c51c1732..000000000000 --- a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_agents_operations_async.py +++ /dev/null @@ -1,607 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import AzureAIPreparer -from testpreparer_async import AzureAIClientTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAzureAIAgentsOperationsAsync(AzureAIClientTestBaseAsync): - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_create_agent(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.create_agent( - body={ - "model": "str", - "description": "str", - "instructions": "str", - "metadata": {"str": "str"}, - "name": "str", - "response_format": "str", - "temperature": 0.0, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - "tools": ["tool_definition"], - "top_p": 0.0, - }, - model="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_list_agents(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.list_agents() - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_get_agent(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.get_agent( - assistant_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_update_agent(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.update_agent( - assistant_id="str", - body={ - "description": "str", - "instructions": "str", - "metadata": {"str": "str"}, - "model": "str", - "name": "str", - "response_format": "str", - "temperature": 0.0, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - "tools": ["tool_definition"], - "top_p": 0.0, - }, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_delete_agent(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.delete_agent( - assistant_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_create_thread(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.create_thread( - body={ - "messages": [ - { - "content": "str", - "role": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "metadata": {"str": "str"}, - } - ], - "metadata": {"str": "str"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - }, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_get_thread(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.get_thread( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_update_thread(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.update_thread( - thread_id="str", - body={ - "metadata": {"str": "str"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - }, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_delete_thread(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.delete_thread( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_create_message(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.create_message( - thread_id="str", - body={ - "content": "str", - "role": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "metadata": {"str": "str"}, - }, - role="str", - content="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_list_messages(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.list_messages( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_get_message(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.get_message( - thread_id="str", - message_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_update_message(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.update_message( - thread_id="str", - message_id="str", - body={"metadata": {"str": "str"}}, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_create_run(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.create_run( - thread_id="str", - body={ - "assistant_id": "str", - "additional_instructions": "str", - "additional_messages": [ - { - "assistant_id": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "completed_at": "2020-02-20 00:00:00", - "content": ["message_content"], - "created_at": "2020-02-20 00:00:00", - "id": "str", - "incomplete_at": "2020-02-20 00:00:00", - "incomplete_details": {"reason": "str"}, - "metadata": {"str": "str"}, - "object": "thread.message", - "role": "str", - "run_id": "str", - "status": "str", - "thread_id": "str", - } - ], - "instructions": "str", - "max_completion_tokens": 0, - "max_prompt_tokens": 0, - "metadata": {"str": "str"}, - "model": "str", - "response_format": "str", - "stream": bool, - "temperature": 0.0, - "tool_choice": "str", - "tools": ["tool_definition"], - "top_p": 0.0, - "truncation_strategy": {"type": "str", "last_messages": 0}, - }, - assistant_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_list_runs(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.list_runs( - thread_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_get_run(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.get_run( - thread_id="str", - run_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_update_run(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.update_run( - thread_id="str", - run_id="str", - body={"metadata": {"str": "str"}}, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_submit_tool_outputs_to_run(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.submit_tool_outputs_to_run( - thread_id="str", - run_id="str", - body={"tool_outputs": [{"output": "str", "tool_call_id": "str"}], "stream": bool}, - tool_outputs=[{"output": "str", "tool_call_id": "str"}], - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_cancel_run(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.cancel_run( - thread_id="str", - run_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_create_thread_and_run(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.create_thread_and_run( - body={ - "assistant_id": "str", - "instructions": "str", - "max_completion_tokens": 0, - "max_prompt_tokens": 0, - "metadata": {"str": "str"}, - "model": "str", - "response_format": "str", - "stream": bool, - "temperature": 0.0, - "thread": { - "messages": [ - { - "content": "str", - "role": "str", - "attachments": [{"file_id": "str", "tools": [{"type": "code_interpreter"}]}], - "metadata": {"str": "str"}, - } - ], - "metadata": {"str": "str"}, - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - }, - "tool_choice": "str", - "tool_resources": { - "code_interpreter": {"file_ids": ["str"]}, - "file_search": {"vector_store_ids": ["str"]}, - }, - "tools": ["tool_definition"], - "top_p": 0.0, - "truncation_strategy": {"type": "str", "last_messages": 0}, - }, - assistant_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_get_run_step(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.get_run_step( - thread_id="str", - run_id="str", - step_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_list_run_steps(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.list_run_steps( - thread_id="str", - run_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_list_files(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.list_files() - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_upload_file(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.upload_file( - body={"file": "filetype", "purpose": "str", "filename": "str"}, - file="filetype", - purpose="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_delete_file(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.delete_file( - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_get_file(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.get_file( - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_get_file_content(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.get_file_content( - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_list_vector_stores(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.list_vector_stores() - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_create_vector_store(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.create_vector_store( - body={ - "chunking_strategy": "vector_store_chunking_strategy_request", - "expires_after": {"anchor": "str", "days": 0}, - "file_ids": ["str"], - "metadata": {"str": "str"}, - "name": "str", - }, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_get_vector_store(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.get_vector_store( - vector_store_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_modify_vector_store(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.modify_vector_store( - vector_store_id="str", - body={"expires_after": {"anchor": "str", "days": 0}, "metadata": {"str": "str"}, "name": "str"}, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_delete_vector_store(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.delete_vector_store( - vector_store_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_list_vector_store_files(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.list_vector_store_files( - vector_store_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_create_vector_store_file(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.create_vector_store_file( - vector_store_id="str", - body={"file_id": "str", "chunking_strategy": "vector_store_chunking_strategy_request"}, - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_get_vector_store_file(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.get_vector_store_file( - vector_store_id="str", - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_delete_vector_store_file(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.delete_vector_store_file( - vector_store_id="str", - file_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_create_vector_store_file_batch(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.create_vector_store_file_batch( - vector_store_id="str", - body={"file_ids": ["str"], "chunking_strategy": "vector_store_chunking_strategy_request"}, - file_ids=["str"], - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_get_vector_store_file_batch(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.get_vector_store_file_batch( - vector_store_id="str", - batch_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_cancel_vector_store_file_batch(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.cancel_vector_store_file_batch( - vector_store_id="str", - batch_id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_agents_list_vector_store_file_batch_files(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.agents.list_vector_store_file_batch_files( - vector_store_id="str", - batch_id="str", - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations.py b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations.py deleted file mode 100644 index bc8f590aa3cb..000000000000 --- a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations.py +++ /dev/null @@ -1,146 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import AzureAIClientTestBase, AzureAIPreparer - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAzureAIEvaluationsOperations(AzureAIClientTestBase): - @AzureAIPreparer() - @recorded_by_proxy - def test_evaluations_create(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.evaluations.create( - evaluation={ - "data": "input_data", - "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, - "description": "str", - "displayName": "str", - "id": "str", - "properties": {"str": "str"}, - "status": "str", - "systemData": { - "createdAt": "2020-02-20 00:00:00", - "createdBy": "str", - "createdByType": "str", - "lastModifiedAt": "2020-02-20 00:00:00", - }, - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_evaluations_list(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.evaluations.list() - result = [r for r in response] - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_evaluations_update(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.evaluations.update( - id="str", - update_request={"description": "str", "displayName": "str", "tags": {"str": "str"}}, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_evaluations_get(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.evaluations.get( - id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_evaluations_create_schedule(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.evaluations.create_schedule( - body={ - "data": "input_data", - "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, - "samplingStrategy": {"rate": 0.0}, - "cronExpression": "str", - "description": "str", - "displayName": "str", - "id": "str", - "properties": {"str": "str"}, - "recurrence": { - "frequency": "str", - "interval": 0, - "schedule": {"hours": [0], "minutes": [0], "monthDays": [0], "weekDays": ["str"]}, - }, - "status": "str", - "systemData": { - "createdAt": "2020-02-20 00:00:00", - "createdBy": "str", - "createdByType": "str", - "lastModifiedAt": "2020-02-20 00:00:00", - }, - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_evaluations_get_schedule(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.evaluations.get_schedule( - id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_evaluations_list_schedules(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.evaluations.list_schedules() - result = [r for r in response] - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_evaluations_list_schedule_evaluations(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.evaluations.list_schedule_evaluations( - id="str", - ) - result = [r for r in response] - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy - def test_evaluations_delete_schedule(self, azureai_endpoint): - client = self.create_client(endpoint=azureai_endpoint) - response = client.evaluations.delete_schedule( - id="str", - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations_async.py b/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations_async.py deleted file mode 100644 index 8c1e28de6539..000000000000 --- a/sdk/ai/azure-ai-client/generated_tests/test_azure_ai_evaluations_operations_async.py +++ /dev/null @@ -1,147 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import AzureAIPreparer -from testpreparer_async import AzureAIClientTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestAzureAIEvaluationsOperationsAsync(AzureAIClientTestBaseAsync): - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_evaluations_create(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.evaluations.create( - evaluation={ - "data": "input_data", - "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, - "description": "str", - "displayName": "str", - "id": "str", - "properties": {"str": "str"}, - "status": "str", - "systemData": { - "createdAt": "2020-02-20 00:00:00", - "createdBy": "str", - "createdByType": "str", - "lastModifiedAt": "2020-02-20 00:00:00", - }, - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_evaluations_list(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = client.evaluations.list() - result = [r async for r in response] - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_evaluations_update(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.evaluations.update( - id="str", - update_request={"description": "str", "displayName": "str", "tags": {"str": "str"}}, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_evaluations_get(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.evaluations.get( - id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_evaluations_create_schedule(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.evaluations.create_schedule( - body={ - "data": "input_data", - "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, - "samplingStrategy": {"rate": 0.0}, - "cronExpression": "str", - "description": "str", - "displayName": "str", - "id": "str", - "properties": {"str": "str"}, - "recurrence": { - "frequency": "str", - "interval": 0, - "schedule": {"hours": [0], "minutes": [0], "monthDays": [0], "weekDays": ["str"]}, - }, - "status": "str", - "systemData": { - "createdAt": "2020-02-20 00:00:00", - "createdBy": "str", - "createdByType": "str", - "lastModifiedAt": "2020-02-20 00:00:00", - }, - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_evaluations_get_schedule(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.evaluations.get_schedule( - id="str", - ) - - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_evaluations_list_schedules(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = client.evaluations.list_schedules() - result = [r async for r in response] - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_evaluations_list_schedule_evaluations(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = client.evaluations.list_schedule_evaluations( - id="str", - ) - result = [r async for r in response] - # please add some check logic here by yourself - # ... - - @AzureAIPreparer() - @recorded_by_proxy_async - async def test_evaluations_delete_schedule(self, azureai_endpoint): - client = self.create_async_client(endpoint=azureai_endpoint) - response = await client.evaluations.delete_schedule( - id="str", - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations.py b/sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations.py deleted file mode 100644 index b3452e1889bf..000000000000 --- a/sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import ClientTestBase, Preparer - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestEndpointsOperations(ClientTestBase): - @Preparer() - @recorded_by_proxy - def test_endpoints_list(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.endpoints.list() - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_endpoints_list_secrets(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.endpoints.list_secrets( - connection_name_in_url="str", - body={ - "apiVersionInBody": "str", - "connectionName": "str", - "resourceGroupName": "str", - "subscriptionId": "str", - "workspaceName": "str", - }, - connection_name="str", - subscription_id="str", - resource_group_name="str", - workspace_name="str", - api_version_in_body="str", - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations_async.py b/sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations_async.py deleted file mode 100644 index 3bcdb63affc8..000000000000 --- a/sdk/ai/azure-ai-client/generated_tests/test_endpoints_operations_async.py +++ /dev/null @@ -1,46 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import Preparer -from testpreparer_async import ClientTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestEndpointsOperationsAsync(ClientTestBaseAsync): - @Preparer() - @recorded_by_proxy_async - async def test_endpoints_list(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.endpoints.list() - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_endpoints_list_secrets(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.endpoints.list_secrets( - connection_name_in_url="str", - body={ - "apiVersionInBody": "str", - "connectionName": "str", - "resourceGroupName": "str", - "subscriptionId": "str", - "workspaceName": "str", - }, - connection_name="str", - subscription_id="str", - resource_group_name="str", - workspace_name="str", - api_version_in_body="str", - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations.py b/sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations.py deleted file mode 100644 index 13e69dbd88c1..000000000000 --- a/sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations.py +++ /dev/null @@ -1,71 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils import recorded_by_proxy -from testpreparer import ClientTestBase, Preparer - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestEvaluationsOperations(ClientTestBase): - @Preparer() - @recorded_by_proxy - def test_evaluations_evaluations_get(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.evaluations.evaluations.get( - id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_evaluations_evaluations_create(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.evaluations.evaluations.create( - body={ - "data": "input_data", - "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, - "description": "str", - "displayName": "str", - "id": "str", - "properties": {"str": "str"}, - "status": "str", - "systemData": { - "createdAt": "2020-02-20 00:00:00", - "createdBy": "str", - "createdByType": "str", - "lastModifiedAt": "2020-02-20 00:00:00", - }, - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_evaluations_evaluations_list(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.evaluations.evaluations.list() - result = [r for r in response] - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy - def test_evaluations_evaluations_update(self, _endpoint): - client = self.create_client(endpoint=_endpoint) - response = client.evaluations.evaluations.update( - id="str", - body={"description": "str", "displayName": "str", "tags": {"str": "str"}}, - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations_async.py b/sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations_async.py deleted file mode 100644 index 345d84c7d13d..000000000000 --- a/sdk/ai/azure-ai-client/generated_tests/test_evaluations_operations_async.py +++ /dev/null @@ -1,72 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import pytest -from devtools_testutils.aio import recorded_by_proxy_async -from testpreparer import Preparer -from testpreparer_async import ClientTestBaseAsync - - -@pytest.mark.skip("you may need to update the auto-generated test case before run it") -class TestEvaluationsOperationsAsync(ClientTestBaseAsync): - @Preparer() - @recorded_by_proxy_async - async def test_evaluations_evaluations_get(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.evaluations.evaluations.get( - id="str", - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_evaluations_evaluations_create(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.evaluations.evaluations.create( - body={ - "data": "input_data", - "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}}, - "description": "str", - "displayName": "str", - "id": "str", - "properties": {"str": "str"}, - "status": "str", - "systemData": { - "createdAt": "2020-02-20 00:00:00", - "createdBy": "str", - "createdByType": "str", - "lastModifiedAt": "2020-02-20 00:00:00", - }, - "tags": {"str": "str"}, - }, - ) - - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_evaluations_evaluations_list(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = client.evaluations.evaluations.list() - result = [r async for r in response] - # please add some check logic here by yourself - # ... - - @Preparer() - @recorded_by_proxy_async - async def test_evaluations_evaluations_update(self, _endpoint): - client = self.create_async_client(endpoint=_endpoint) - response = await client.evaluations.evaluations.update( - id="str", - body={"description": "str", "displayName": "str", "tags": {"str": "str"}}, - ) - - # please add some check logic here by yourself - # ... diff --git a/sdk/ai/azure-ai-client/generated_tests/testpreparer.py b/sdk/ai/azure-ai-client/generated_tests/testpreparer.py deleted file mode 100644 index 7230206c1f80..000000000000 --- a/sdk/ai/azure-ai-client/generated_tests/testpreparer.py +++ /dev/null @@ -1,24 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from azure.ai.client import AzureAIClient -from devtools_testutils import AzureRecordedTestCase, PowerShellPreparer -import functools - - -class AzureAIClientTestBase(AzureRecordedTestCase): - - def create_client(self, endpoint): - credential = self.get_credential(AzureAIClient) - return self.create_client_from_credential( - AzureAIClient, - credential=credential, - endpoint=endpoint, - ) - - -AzureAIPreparer = functools.partial(PowerShellPreparer, "azureai", azureai_endpoint="https://fake_azureai_endpoint.com") diff --git a/sdk/ai/azure-ai-client/generated_tests/testpreparer_async.py b/sdk/ai/azure-ai-client/generated_tests/testpreparer_async.py deleted file mode 100644 index 85d0b79b37c3..000000000000 --- a/sdk/ai/azure-ai-client/generated_tests/testpreparer_async.py +++ /dev/null @@ -1,20 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from azure.ai.client.aio import AzureAIClient -from devtools_testutils import AzureRecordedTestCase - - -class AzureAIClientTestBaseAsync(AzureRecordedTestCase): - - def create_async_client(self, endpoint): - credential = self.get_credential(AzureAIClient, is_async=True) - return self.create_client_from_credential( - AzureAIClient, - credential=credential, - endpoint=endpoint, - ) diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py new file mode 100644 index 000000000000..e2d03086acf5 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py @@ -0,0 +1,65 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_azure_openai_client_async.py + +DESCRIPTION: + Given an AzureAIClient, this sample demonstrates how to get an authenticated + AsyncAzureOpenAI client from the azure.ai.inference package. + +USAGE: + python sample_get_azure_openai_client_async.py + + Before running the sample: + + pip install azure.ai.client aiohttp openai_async + + Set this environment variable with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os +import asyncio +from azure.ai.client.aio import AzureAIClient +from azure.identity import DefaultAzureCredential + +async def sample_get_azure_openai_client_async(): + + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # It should have the format ";;;" + async with AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=os.environ["AI_CLIENT_CONNECTION_STRING"], + ) as ai_client: + + # Or, you can create the Azure AI Client by giving all required parameters directly + # async with AzureAIClient( + # credential=DefaultAzureCredential(), + # endpoint=os.environ["AI_CLIENT_ENDPOINT"], + # subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + # resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + # workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + # ) as ai_client: + + # Get an authenticated AsyncAzureOpenAI client for your default Azure OpenAI connection: + async with await ai_client.inference.get_azure_openai_client() as client: + + response = await client.chat.completions.create( + model="gpt-4-0613", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print(response.choices[0].message.content) + +async def main(): + await sample_get_azure_openai_client_async() + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py index 9406ed579964..27201ddef947 100644 --- a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py +++ b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py @@ -7,8 +7,8 @@ FILE: sample_get_chat_completions_client_async.py DESCRIPTION: - This sample demonstrates how to get an authenticated ChatCompletionsClient - from the azure.ai.inference package, from an AzureAIClient. + Given an AzureAIClient, this sample demonstrates how to get an authenticated + async ChatCompletionsClient from the azure.ai.inference package. USAGE: python sample_get_chat_completions_client_async.py @@ -17,8 +17,8 @@ pip install azure.ai.client aiohttp azure-identity - Set the environment variables with your own values: - 1) AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os import asyncio diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py new file mode 100644 index 000000000000..051ba27eb26d --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py @@ -0,0 +1,63 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_embeddings_client_async.py + +DESCRIPTION: + Given an AzureAIClient, this sample demonstrates how to get an authenticated + async EmbeddingsClient from the azure.ai.inference package. + +USAGE: + python sample_get_embeddings_client_async.py + + Before running the sample: + + pip install azure.ai.client aiohttp azure-identity + + Set this environment variable with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import os +from azure.ai.client.aio import AzureAIClient +from azure.identity import DefaultAzureCredential + +async def sample_get_embeddings_client_async(): + + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # It should have the format ";;;" + async with AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=os.environ["AI_CLIENT_CONNECTION_STRING"], + ) as ai_client: + + # Or, you can create the Azure AI Client by giving all required parameters directly + # async with AzureAIClient( + # credential=DefaultAzureCredential(), + # endpoint=os.environ["AI_CLIENT_ENDPOINT"], + # subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + # resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + # workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + # ) as ai_client: + + # Get an authenticated async azure.ai.inference embeddings client for your default Serverless connection: + async with await ai_client.inference.get_embeddings_client() as client: + + response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) + + for item in response.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) + + +async def main(): + await sample_get_embeddings_client_async() + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py index 82ffeecdd8ef..73f8c00ccd6e 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py @@ -2,6 +2,24 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ + +""" +FILE: sample_get_azure_openai_client.py + +DESCRIPTION: + Given an AzureAIClient, this sample demonstrates how to get an authenticated + AsyncAzureOpenAI client from the azure.ai.inference package. + +USAGE: + python sample_get_azure_openai_client.py + + Before running the sample: + + pip install azure.ai.client openai + + Set this environment variable with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" import os from azure.ai.client import AzureAIClient from azure.identity import DefaultAzureCredential diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py index 07613f1ff1a8..20ac52c8dd0a 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py @@ -2,6 +2,24 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ + +""" +FILE: sample_get_chat_completions_client.py + +DESCRIPTION: + Given an AzureAIClient, this sample demonstrates how to get an authenticated + async ChatCompletionsClient from the azure.ai.inference package. + +USAGE: + python sample_get_chat_completions_client.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" import os from azure.ai.client import AzureAIClient from azure.ai.inference.models import UserMessage diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py index e0b6bf7d6e81..cc6a306685ad 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py @@ -2,6 +2,24 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ + +""" +FILE: sample_get_embeddings_client.py + +DESCRIPTION: + Given an AzureAIClient, this sample demonstrates how to get an authenticated + async EmbeddingsClient from the azure.ai.inference package. + +USAGE: + python sample_get_embeddings_client.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variable with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" import os from azure.ai.client import AzureAIClient from azure.identity import DefaultAzureCredential From 37405e0902f125ea5bf0bdaca35005a6747d632c Mon Sep 17 00:00:00 2001 From: howieleung Date: Thu, 10 Oct 2024 16:24:35 -0700 Subject: [PATCH 022/138] migrate async, poll functions, and samples from assistant to agent (#37831) * migrate async, poll functions, and samples from assistant to agent * migrate async, poll functions, and samples from assistant to agent * resolved comments * resolve comments * using print instead of logging * moved ai_client to main function * Resolved comment * Fixed samples --- .../azure/ai/client/aio/operations/_patch.py | 1135 ++++++++++++++++- .../azure/ai/client/operations/_patch.py | 225 ++++ .../sample_agents_basics_async.py | 88 ++ .../sample_agents_functions_async.py | 122 ++ ...sample_agents_stream_eventhandler_async.py | 111 ++ ..._stream_eventhandler_with_toolset_async.py | 160 +++ .../sample_agents_stream_iteration_async.py | 104 ++ ...gents_with_file_search_attachment_async.py | 102 ++ .../async_samples/user_async_functions.py | 28 + .../samples/agents/product_info_1.md | 51 + .../samples/agents/sample_agents_basics.py | 61 +- ...mple_agents_code_interpreter_attachment.py | 90 ++ .../agents/sample_agents_file_search.py | 107 ++ .../samples/agents/sample_agents_functions.py | 133 +- .../agents/sample_agents_run_with_toolset.py | 70 +- .../sample_agents_stream_eventhandler.py | 55 +- ...agents_stream_eventhandler_with_toolset.py | 154 +++ .../agents/sample_agents_stream_iteration.py | 86 +- ...le_agents_stream_iteration_with_toolset.py | 152 +++ ...mple_agents_with_file_search_attachment.py | 99 ++ 20 files changed, 2976 insertions(+), 157 deletions(-) create mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/user_async_functions.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/product_info_1.md create mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index 950d65855134..66c3c87bc38a 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -6,15 +6,27 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +from argparse import FileType +import io import logging -from typing import List, AsyncIterable +import os +import time +from typing import IO, Any, Dict, List, AsyncIterable, MutableMapping, Optional, Union, overload + +from azure.ai.client import _types from ._operations import EndpointsOperations as EndpointsOperationsGenerated +from ._operations import AgentsOperations as AgentsOperationsGenerated from ...models._patch import EndpointProperties -from ...models._enums import AuthenticationType, EndpointType +from ...models._enums import AuthenticationType, EndpointType, FilePurpose from ...models._models import ConnectionsListSecretsResponse, ConnectionsListResponse +from ... import models as _models +from azure.core.tracing.decorator_async import distributed_trace_async logger = logging.getLogger(__name__) +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() + class InferenceOperations: @@ -211,8 +223,1127 @@ async def list( else: yield await self.get(endpoint_name=connection.name, populate_secrets=True) +class AgentsOperations(AgentsOperationsGenerated): + + @overload + async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.client.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """ + Creates a new agent with toolset. + + :keyword model: The ID of the model to use. Required if `body` is not provided. + :paramtype model: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: A description for the new agent. Default value is None. + :paramtype description: str + :keyword instructions: System instructions for the agent. Default value is None. + :paramtype instructions: str + :keyword toolset: Collection of tools (alternative to `tools` and `tool_resources`). Default + value is None. + :paramtype toolset: ~azure.ai.client.models.ToolSet + :keyword temperature: Sampling temperature for generating agent responses. Default value + is None. + :paramtype temperature: float + :keyword top_p: Nucleus sampling parameter. Default value is None. + :paramtype top_p: float + :keyword response_format: Response format for tool calls. Default value is None. + :paramtype response_format: ~azure.ai.client.models.AgentsApiResponseFormatOption + :keyword metadata: Key/value pairs for storing additional information. Default value is None. + :paramtype metadata: dict[str, str] + :return: An Agent object. + :rtype: ~azure.ai.client.models.Agent + :raises: ~azure.core.exceptions.HttpResponseError + """ + + @distributed_trace_async + async def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.Agent: + """ + Creates a new agent with various configurations, delegating to the generated operations. + + :param body: JSON or IO[bytes]. Required if `model` is not provided. + :param model: The ID of the model to use. Required if `body` is not provided. + :param name: The name of the new agent. + :param description: A description for the new agent. + :param instructions: System instructions for the agent. + :param tools: List of tools definitions for the agent. + :param tool_resources: Resources used by the agent's tools. + :param toolset: Collection of tools (alternative to `tools` and `tool_resources`). + :param temperature: Sampling temperature for generating agent responses. + :param top_p: Nucleus sampling parameter. + :param response_format: Response format for tool calls. + :param metadata: Key/value pairs for storing additional information. + :param content_type: Content type of the body. + :param kwargs: Additional parameters. + :return: An Agent object. + :raises: HttpResponseError for HTTP errors. + """ + if body is not _Unset: + if isinstance(body, io.IOBase): + return await super().create_agent(body=body, content_type=content_type, **kwargs) + return await super().create_agent(body=body, **kwargs) + + if toolset is not None: + self._toolset = toolset + tools = toolset.definitions + tool_resources = toolset.resources + + return await super().create_agent( + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + def get_toolset(self) -> Optional[_models.ToolSet]: + """ + Get the toolset for the agent. + + :return: The toolset for the agent. If not set, returns None. + :rtype: ~azure.ai.client.models.ToolSet + """ + if hasattr(self, "_toolset"): + return self._toolset + return None + + @overload + async def create_run( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword stream: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or + ~azure.ai.client.models.AgentsApiResponseFormatMode or + ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword stream: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or + ~azure.ai.client.models.AgentsApiResponseFormatMode or + ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=stream, + stream=stream, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # If streaming is enabled, return the custom stream object + if stream: + return _models.AsyncAgentRunStream(await response, event_handler) + else: + return await response + + @distributed_trace_async + async def create_and_process_run( + self, + thread_id: str, + assistant_id: str, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + sleep_interval: int = 1, + **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + """Creates a new run for an agent thread and processes the run. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword stream: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or + ~azure.ai.client.models.AgentsApiResponseFormatMode or + ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler + :keyword sleep_interval: The time in seconds to wait between polling the service for run status. + Default value is 1. + :paramtype sleep_interval: int + :return: str or AsyncAgentRunStream. The run completion status if streaming is disabled, otherwise + the AsyncAgentRunStream object. + :rtype: str or ~azure.ai.client.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + # Create and initiate the run with additional parameters + run = await self.create_run( + thread_id=thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream=stream, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + event_handler=event_handler, + **kwargs + ) + + # Return the run stream object if streaming is enabled + if stream: + return run + + # Monitor and process the run status + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(sleep_interval) + run = await self.get_run(thread_id=thread_id, run_id=run.id) + + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logger.warning("No tool calls provided - cancelling run") + await self.cancel_run(thread_id=thread_id, run_id=run.id) + break + + toolset = self.get_toolset() + if toolset: + tool_outputs = await toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + logger.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + await self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) + + logger.info("Current run status: %s", run.status) + + return run + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream: Optional[bool] = None, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword stream: Default value is None. + :paramtype stream: bool + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + stream: Optional[bool] = None, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :param tool_outputs: List of tool outputs to submit. + :param stream: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :param event_handler: The event handler to use for processing events during the run. + :param kwargs: Additional parameters. + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :raises: HttpResponseError for HTTP errors. + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run( + thread_id, run_id, body, content_type=content_type, **kwargs + ) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=stream, stream=stream, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run( + thread_id, run_id, body, content_type=content_type, **kwargs + ) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # If streaming is enabled, return the custom stream object + if stream: + return _models.AsyncAgentRunStream(await response, event_handler) + else: + return await response + + @overload + async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.client._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file( + self, file_path: str, *, purpose: str, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def upload_file( + self, + body: Union[JSON, None] = None, + *, + file: Union[FileType, None] = None, + file_path: Optional[str] = None, + purpose: Optional[Union[str, _models.FilePurpose]] = None, + filename: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :param file: File content. Required if `body` and `purpose` are not provided. + :param file_path: Path to the file. Required if `body` and `purpose` are not provided. + :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :param filename: The name of the file. + :param kwargs: Additional parameters. + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + return await super().upload_file(body=body, **kwargs) + + if isinstance(purpose, FilePurpose): + purpose = purpose.value + + if file is not None and purpose is not None: + return await super().upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + + if file_path is not None and purpose is not None: + if not os.path.isfile(file_path): + raise FileNotFoundError(f"The file path provided does not exist: {file_path}") + + try: + with open(file_path, 'rb') as f: + content = f.read() + + # Determine filename and create correct FileType + base_filename = filename or os.path.basename(file_path) + file_content: FileType = (base_filename, content) + + return await super().upload_file(file=file_content, purpose=purpose, **kwargs) + except IOError as e: + raise IOError(f"Unable to read file: {file_path}. Reason: {str(e)}") + + raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") + + @overload + async def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file_and_poll( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, sleep_interval: float = 1, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.client._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file_and_poll( + self, file_path: str, *, purpose: str, sleep_interval: float = 1, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def upload_file_and_poll( + self, + body: Union[JSON, None] = None, + *, + file: Union[FileType, None] = None, + file_path: Optional[str] = None, + purpose: Optional[Union[str, _models.FilePurpose]] = None, + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :param file: File content. Required if `body` and `purpose` are not provided. + :param file_path: Path to the file. Required if `body` and `purpose` are not provided. + :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :param filename: The name of the file. + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :param kwargs: Additional parameters. + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + file = await self.upload_file(body=body, file=file, file_path=file_path, purpose=purpose, filename=filename, **kwargs) + + while file.status in ["uploaded", "pending", "running"]: + time.sleep(sleep_interval) + file = await self.get_file(file.id) + + return file + + @overload + async def create_vector_store_and_poll( + self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_and_poll( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_and_poll( + self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + @distributed_trace_async + async def create_vector_store_and_poll( + self, + body: Union[JSON, IO[bytes]] = None, + *, + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + vector_store = await self.create_vector_store( + body=body, + file_ids=file_ids, + name=name, + expires_after=expires_after, + chunking_strategy=chunking_strategy, + metadata=metadata, + **kwargs + ) + while vector_store.status == "in_progress": + time.sleep(sleep_interval) + vector_store = await self.get_vector_store(vector_store.id) + + return vector_store __all__: List[str] = [ + "AgentsOperations", "EndpointsOperations", "InferenceOperations", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index 99056fb76a51..ae2ffb5bbe56 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -1127,6 +1127,231 @@ def upload_file( raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") + @overload + def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file_and_poll( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, sleep_interval: float = 1, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.client._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file_and_poll( + self, file_path: str, *, purpose: str, sleep_interval: float = 1, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.client.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def upload_file_and_poll( + self, + body: Union[JSON, None] = None, + *, + file: Union[FileType, None] = None, + file_path: Optional[str] = None, + purpose: Optional[Union[str, _models.FilePurpose]] = None, + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :param file: File content. Required if `body` and `purpose` are not provided. + :param file_path: Path to the file. Required if `body` and `purpose` are not provided. + :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :param filename: The name of the file. + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :param kwargs: Additional parameters. + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + file = self.upload_file(body=body, file=file, file_path=file_path, purpose=purpose, filename=filename, **kwargs) + + while file.status in ["uploaded", "pending", "running"]: + time.sleep(sleep_interval) + file = self.get_file(file.id) + + return file + + @overload + def create_vector_store_and_poll( + self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_and_poll( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_and_poll( + self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + @distributed_trace + def create_vector_store_and_poll( + self, + body: Union[JSON, IO[bytes]] = None, + *, + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + vector_store = self.create_vector_store( + body=body, + file_ids=file_ids, + name=name, + expires_after=expires_after, + chunking_strategy=chunking_strategy, + metadata=metadata, + **kwargs + ) + while vector_store.status == "in_progress": + time.sleep(sleep_interval) + vector_store = self.get_vector_store(vector_store.id) + + return vector_store + __all__: List[str] = [ "AgentsOperations", diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py new file mode 100644 index 000000000000..2e89732c94ff --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py @@ -0,0 +1,88 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_basics_async.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_basics_async.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import time + +from azure.ai.client.aio import AzureAIClient +from azure.identity import DefaultAzureCredential + +import os + +async def main(): + + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + + ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, + ) + + # Or, you can create the Azure AI Client by giving all required parameters directly + """ + ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + ) + """ + + async with ai_client: + agent = await ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await ai_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID: {message.id}") + + run = await ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + + # poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + await ai_client.agents.delete_agent(agent.id) + print("Deleted assistant") + + messages = await ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py new file mode 100644 index 000000000000..4aeb19e4d894 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py @@ -0,0 +1,122 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_functions_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with custom functions from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_functions_async.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import time + +from azure.ai.client.aio import AzureAIClient +from azure.ai.client.models import AsyncFunctionTool +from azure.identity import DefaultAzureCredential + +import os + +from user_async_functions import user_async_functions + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + + ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, + ) + + # Or, you can create the Azure AI Client by giving all required parameters directly + """ + ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + ) + """ + + async with ai_client: + # Initialize assistant functions + functions = AsyncFunctionTool(functions=user_async_functions) + + # Create agent + agent = await ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", tools=functions.definitions + ) + print(f"Created agent, agent ID: {agent.id}") + print("Created assistant client") + + # Create thread for communication + thread = await ai_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create and send message + message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what's the time?") + print(f"Created message, ID: {message.id}") + + # Create and run assistant task + run = await ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, ID: {run.id}") + + # Polling loop for run status + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(4) + run = await ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + await ai_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + output = await functions.execute(tool_call) + tool_output = { + "tool_call_id": tool_call.id, + "output": output, + } + tool_outputs.append(tool_output) + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + await ai_client.agents.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the assistant when done + await ai_client.agents.delete_agent(agent.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = await ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()); \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py new file mode 100644 index 000000000000..eb0b65bb36f7 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -0,0 +1,111 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler in streaming from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_stream_eventhandler_async.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +from typing import Any + +from azure.ai.client.aio import AzureAIClient +from azure.ai.client.models._models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.client.models._patch import AsyncAgentEventHandler +from azure.identity import DefaultAzureCredential + +import os + +class MyEventHandler(AsyncAgentEventHandler): + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + async def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + async def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + async def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + async def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + async def on_done(self) -> None: + print("Stream completed.") + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + + ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, + ) + + # Or, you can create the Azure AI Client by giving all required parameters directly + """ + ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + ) + """ + + async with ai_client: + agent = await ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await ai_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + async with await ai_client.agents.create_and_process_run( + thread_id=thread.id, + assistant_id=agent.id, + stream=True, + event_handler=MyEventHandler() + ) as stream: + await stream.until_done() + + await ai_client.agents.delete_agent(agent.id) + print("Deleted assistant") + + messages = await ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py new file mode 100644 index 000000000000..86c7ec596e09 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py @@ -0,0 +1,160 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_with_toolset_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler and toolset from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_toolset_async.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +from typing import Any + +from azure.ai.client.aio import AzureAIClient +from azure.ai.client.models import _models +from azure.ai.client.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, SubmitToolOutputsAction, ThreadMessage, ThreadRun +from azure.ai.client.models import AsyncAgentEventHandler, AsyncFunctionTool, AsyncToolSet +from azure.ai.client.operations._patch import AgentsOperations +from azure.identity import DefaultAzureCredential + +import os + +from user_async_functions import user_async_functions + +class MyEventHandler(AsyncAgentEventHandler): + + def __init__(self, agents: AgentsOperations) -> None: + self._agents = agents + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + async def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + async def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + await self._handle_submit_tool_outputs(run) + + async def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + async def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + async def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + async def on_done(self) -> None: + print("Stream completed.") + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + async def _handle_submit_tool_outputs(self, run: "ThreadRun") -> None: + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls to execute.") + return + if not self._agents: + print("AssistantClient not set. Cannot execute tool calls using toolset.") + return + + toolset = self._agents.get_toolset() + if toolset: + tool_outputs = await toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + async with await self._agents.submit_tool_outputs_to_run( + thread_id=run.thread_id, + run_id=run.id, + tool_outputs=tool_outputs, + stream=True, + event_handler=self + ) as stream: + await stream.until_done() + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + + ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, + ) + + # Or, you can create the Azure AI Client by giving all required parameters directly + """ + ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + ) + """ + + # Initialize toolset with user functions + functions = AsyncFunctionTool(user_async_functions) + toolset = AsyncToolSet() + toolset.add(functions) + + async with ai_client: + + agent = await ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", toolset=toolset + ) + print(f"Created agent, agent ID: {agent.id}") + + + thread = await ai_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, send an email with the datetime and weather information in New York? Also let me know the details") + print(f"Created message, message ID {message.id}") + + async with await ai_client.agents.create_and_process_run( + thread_id=thread.id, + assistant_id=agent.id, + stream=True, + event_handler=MyEventHandler(ai_client.agents) + ) as stream: + await stream.until_done() + + await ai_client.agents.delete_agent(agent.id) + print("Deleted assistant") + + messages = await ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py new file mode 100644 index 000000000000..69f50d7cd009 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py @@ -0,0 +1,104 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_iteration_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with interation in streaming from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_stream_iteration_async.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio + +from azure.ai.client.aio import AzureAIClient +from azure.ai.client.models import AgentStreamEvent +from azure.ai.client.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.identity import DefaultAzureCredential + +import os + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + + ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, + ) + + # Or, you can create the Azure AI Client by giving all required parameters directly + """ + ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + ) + """ + + async with ai_client: + agent = await ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await ai_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + async with await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, stream=True) as stream: + async for event_type, event_data in stream: + + if isinstance(event_data, MessageDeltaChunk): + for content_part in event_data.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + await ai_client.agents.delete_agent(agent.id) + print("Deleted assistant") + + messages = await ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py new file mode 100644 index 000000000000..2e5df870ed82 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -0,0 +1,102 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_with_file_search_attachment_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations to create messages with file search attachments from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_with_file_search_attachment_async.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio + +from azure.ai.client.aio import AzureAIClient +from azure.ai.client.models import FilePurpose +from azure.ai.client.models import FileSearchToolDefinition, FileSearchToolResource, MessageAttachment, ToolResources +from azure.identity import DefaultAzureCredential + +import os + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + + ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, + ) + + # Or, you can create the Azure AI Client by giving all required parameters directly + """ + ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + ) + """ + + # upload a file and wait for it to be processed + async with ai_client: + file = await ai_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS, sleep_interval=4) + + # create a vector store with the file and wait for it to be processed + # if you do not specify a vector store, create_message will create a vector store with a default expiration policy of seven days after they were last active + vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[file.id], name="sample_vector_store", sleep_interval=4) + + file_search_tool = FileSearchToolDefinition() + + # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to search the file + # also, you do not need to provide tool_resources if you did not create a vector store above + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", + tools=[file_search_tool], + tool_resources=ToolResources(file_search=FileSearchToolResource(vector_store_ids=[vector_store.id])) + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await ai_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # create a message with the attachment + attachment = MessageAttachment(file_id=file.id, tools=[file_search_tool]) + message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment]) + print(f"Created message, message ID: {message.id}") + + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, sleep_interval=4) + print(f"Created run, run ID: {run.id}") + + print(f"Run completed with status: {run.status}") + + await ai_client.agents.delete_file(file.id) + print("Deleted file") + + await ai_client.agents.delete_vector_store(vector_store.id) + print("Deleted vectore store") + + await ai_client.agents.delete_assistant(agent.id) + print("Deleted assistant") + + messages = await ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/user_async_functions.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/user_async_functions.py new file mode 100644 index 000000000000..66843dc43fe0 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/user_async_functions.py @@ -0,0 +1,28 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import asyncio +import os +import sys + + +# Add parent directory to sys.path to import user_functions +current_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.abspath(os.path.join(current_dir, '..')) +if parent_dir not in sys.path: + sys.path.insert(0, parent_dir) +from user_functions import fetch_current_datetime, fetch_weather, send_email + + +async def send_email_async(recipient: str, subject: str, body: str) -> str: + await asyncio.sleep(1) + return send_email(recipient, subject, body) + + +# Statically defined user functions for fast reference with send_email as async but the rest as sync +user_async_functions = {"fetch_current_datetime": fetch_current_datetime, + "fetch_weather": fetch_weather, + "send_email": send_email_async +} diff --git a/sdk/ai/azure-ai-client/samples/agents/product_info_1.md b/sdk/ai/azure-ai-client/samples/agents/product_info_1.md new file mode 100644 index 000000000000..041155831d53 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/product_info_1.md @@ -0,0 +1,51 @@ +# Information about product item_number: 1 + +## Brand +Contoso Galaxy Innovations + +## Category +Smart Eyewear + +## Features +- Augmented Reality interface +- Voice-controlled AI assistant +- HD video recording with 3D audio +- UV protection and blue light filtering +- Wireless charging with extended battery life + +## User Guide + +### 1. Introduction +Introduction to your new SmartView Glasses + +### 2. Product Overview +Overview of features and controls + +### 3. Sizing and Fit +Finding your perfect fit and style adjustments + +### 4. Proper Care and Maintenance +Cleaning and caring for your SmartView Glasses + +### 5. Break-in Period +Adjusting to the augmented reality experience + +### 6. Safety Tips +Safety guidelines for public and private spaces + +### 7. Troubleshooting +Quick fixes for common issues + +## Warranty Information +Two-year limited warranty on all electronic components + +## Contact Information +Customer Support at support@contoso-galaxy-innovations.com + +## Return Policy +30-day return policy with no questions asked + +## FAQ +- How to sync your SmartView Glasses with your devices +- Troubleshooting connection issues +- Customizing your augmented reality environment diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py index 8c11a30de0b0..e3df8d7aa06b 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py @@ -3,7 +3,25 @@ # Licensed under the MIT License. # ------------------------------------ -import os, time, logging +""" +FILE: sample_agents_basics.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_basics.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os, time from azure.ai.client import AzureAIClient from azure.identity import DefaultAzureCredential @@ -29,32 +47,31 @@ logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) """ -agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" -) -print("Created agent, agent ID", agent.id) -thread = ai_client.agents.create_thread() -print("Created thread, thread ID", thread.id) +with ai_client: + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") -message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") -print("Created message, message ID", message.id) + thread = ai_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") -run = ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) -print("Created run, run ID", run.id) + message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID: {message.id}") -# poll the run as long as run status is queued or in progress -while run.status in ["queued", "in_progress", "requires_action"]: - # wait for a second - time.sleep(1) - run = ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) + run = ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - print("Run status:", run.status) + # poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) -print("Run completed with status:", run.status) + print(f"Run status: {run.status}") -ai_client.agents.delete_agent(agent.id) -print("Deleted agent") + ai_client.agents.delete_agent(agent.id) + print("Deleted agent") -messages = ai_client.agents.list_messages(thread_id=thread.id) -print("messages:", messages) + messages = ai_client.agents.list_messages(thread_id=thread.id) + print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py new file mode 100644 index 000000000000..7cfd3d89f47a --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py @@ -0,0 +1,90 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_code_interpreter_attachment.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with code interpreter from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_code_interpreter_attachment.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os, time +from azure.ai.client import AzureAIClient +from azure.ai.client.models import CodeInterpreterTool +from azure.ai.client.models._enums import FilePurpose +from azure.ai.client.models._models import MessageAttachment +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +""" +ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) +""" + +with ai_client: + # upload a file and wait for it to be processed + file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS, sleep_interval=4) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool + code_interpreter.add_file(file.id) + + # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to view the file + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", + tools=[code_interpreter] + ) + print(f"Created assistant, assistant ID: {agent.id}") + + thread = ai_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # create a message with the attachment + attachment = MessageAttachment(file_id=file.id, tools=[code_interpreter]) + message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment]) + print(f"Created message, message ID: {message.id}") + + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, sleep_interval=4) + print(f"Created run, run ID: {run.id}") + + + ai_client.agents.delete_file(file.id) + print("Deleted file") + + ai_client.agents.delete_agent(agent.id) + print("Deleted assistant") + + messages = ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py new file mode 100644 index 000000000000..599d9cb73696 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py @@ -0,0 +1,107 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_file_search.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with file searching from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_file_search.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import os +from azure.ai.client import AzureAIClient +from azure.ai.client.models._patch import FileSearchTool, ToolSet +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +""" +ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) +""" + +with ai_client: + # Create file search tool + file_search = FileSearchTool() + openai_file = ai_client.agents.upload_file(file_path="product_info_1.md", purpose="assistants") + print(f"Uploaded file, file ID: {openai_file.id}") + + openai_vectorstore = ai_client.agents.create_vector_store_and_poll(file_ids=[openai_file.id], name="my_vectorstore") + print(f"Created vector store, vector store ID: {openai_vectorstore.id}") + + file_search.add_vector_store(openai_vectorstore.id) + + toolset = ToolSet() + toolset.add(file_search) + + #Create agent with toolset and process assistant run + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", toolset=toolset + ) + print(f"Created agent, agent ID: {agent.id}") + + # Create thread for communication + thread = ai_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?") + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + # Note: If vector store has been created just before this, there can be need to poll the status of vector store to be ready for information retrieval + # This can be done by calling `assistant_client.get_vector_store(vector_store_id)` and checking the status of vector store + # We may want to add conveniency around this + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + # Delete the file when done + ai_client.agents.delete_vector_store(openai_vectorstore.id) + print("Deleted vector store") + + # Delete the assistant when done + ai_client.agents.delete_agent(agent.id) + print("Deleted assistant") + + # Fetch and log all messages + messages = ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py index b30d47c651e1..c2a9ee348fb6 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py @@ -3,12 +3,30 @@ # Licensed under the MIT License. # ------------------------------------ +""" +FILE: sample_agents_functions.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with custom functions from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_functions.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" import os, time from azure.ai.client import AzureAIClient from azure.identity import DefaultAzureCredential from azure.ai.client.models import FunctionTool from user_functions import user_functions + # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables @@ -35,60 +53,61 @@ # Initialize function tool with user functions functions = FunctionTool(functions=user_functions) -# Create an agent and run user's request with function calls -agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are a helpful assistant", - tools=functions.definitions, -) -print(f"Created agent, ID: {agent.id}") - -thread = ai_client.agents.create_thread() -print(f"Created thread, ID: {thread.id}") - -message = ai_client.agents.create_message( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York?", -) -print(f"Created message, ID: {message.id}") - -run = ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) -print(f"Created run, ID: {run.id}") - -while run.status in ["queued", "in_progress", "requires_action"]: - time.sleep(1) - run = ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) - - if run.status == "requires_action" and run.required_action.submit_tool_outputs: - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print("No tool calls provided - cancelling run") - ai_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) - break - - tool_outputs = [] - for tool_call in tool_calls: - output = functions.execute(tool_call) - tool_output = { - "tool_call_id": tool_call.id, - "output": output, - } - tool_outputs.append(tool_output) - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - ai_client.agents.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs) - - print(f"Current run status: {run.status}") - -print(f"Run completed with status: {run.status}") - -# Delete the agent when done -ai_client.agents.delete_agent(agent.id) -print("Deleted agent") - -# Fetch and log all messages -messages = ai_client.agents.list_messages(thread_id=thread.id) -print(f"Messages: {messages}") +with ai_client: + # Create an agent and run user's request with function calls + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + print(f"Created agent, ID: {agent.id}") + + thread = ai_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + message = ai_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, ID: {run.id}") + + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + ai_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + output = functions.execute(tool_call) + tool_output = { + "tool_call_id": tool_call.id, + "output": output, + } + tool_outputs.append(tool_output) + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + ai_client.agents.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the agent when done + ai_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py index b8e3a77777a0..34ee7dc290f7 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py @@ -3,12 +3,31 @@ # Licensed under the MIT License. # ------------------------------------ +""" +FILE: sample_agents_run_with_toolset.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_run_with_toolset.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + import os from azure.ai.client import AzureAIClient from azure.identity import DefaultAzureCredential from azure.ai.client.models import FunctionTool, ToolSet, CodeInterpreterTool from user_functions import user_functions + # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables @@ -41,34 +60,35 @@ toolset.add(code_interpreter) # Create agent with toolset and process assistant run -agent = ai_client.agents.create_agent( - model="gpt-4o-mini", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset -) -print(f"Created agent, ID: {agent.id}") +with ai_client: + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset + ) + print(f"Created agent, ID: {agent.id}") -# Create thread for communication -thread = ai_client.agents.create_thread() -print(f"Created thread, ID: {thread.id}") + # Create thread for communication + thread = ai_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") -# Create message to thread -message = ai_client.agents.create_message( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York?", -) -print(f"Created message, ID: {message.id}") + # Create message to thread + message = ai_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") -# Create and process agent run in thread with tools -run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) -print(f"Run finished with status: {run.status}") + # Create and process agent run in thread with tools + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") -if run.status == "failed": - print(f"Run failed: {run.last_error}") + if run.status == "failed": + print(f"Run failed: {run.last_error}") -# Delete the assistant when done -ai_client.agents.delete_agent(agent.id) -print("Deleted agent") + # Delete the assistant when done + ai_client.agents.delete_agent(agent.id) + print("Deleted agent") -# Fetch and log all messages -messages = ai_client.agents.list_messages(thread_id=thread.id) -print(f"Messages: {messages}") + # Fetch and log all messages + messages = ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py index d785f8eda61a..e11d1628bc4e 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py @@ -3,6 +3,24 @@ # Licensed under the MIT License. # ------------------------------------ +""" +FILE: sample_agents_stream_eventhandler.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler in streaming from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + import os from azure.ai.client import AzureAIClient from azure.identity import DefaultAzureCredential @@ -18,6 +36,7 @@ from typing import Any + # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables @@ -41,7 +60,6 @@ ) """ - class MyEventHandler(AgentEventHandler): def on_message_delta(self, delta: "MessageDeltaChunk") -> None: for content_part in delta.delta.content: @@ -68,25 +86,26 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") -# Create an agent and run stream with event handler -agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" -) -print(f"Created agent, agent ID {agent.id}") +with ai_client: + # Create an agent and run stream with event handler + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created agent, agent ID {agent.id}") -thread = ai_client.agents.create_thread() -print(f"Created thread, thread ID {thread.id}") + thread = ai_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") -message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") -print(f"Created message, message ID {message.id}") + message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") -with ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id, stream=True, event_handler=MyEventHandler() -) as stream: - stream.until_done() + with ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id, stream=True, event_handler=MyEventHandler() + ) as stream: + stream.until_done() -ai_client.agents.delete_agent(agent.id) -print("Deleted agent") + ai_client.agents.delete_agent(agent.id) + print("Deleted agent") -messages = ai_client.agents.list_messages(thread_id=thread.id) -print(f"Messages: {messages}") + messages = ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py new file mode 100644 index 000000000000..b9274118f35b --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -0,0 +1,154 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_with_toolset.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler and toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_toolset.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.client import AzureAIClient +from azure.ai.client.models import Agent, MessageDeltaChunk, MessageDeltaTextContent, RunStep, SubmitToolOutputsAction, ThreadMessage, ThreadRun +from azure.ai.client.models import AgentEventHandler +from azure.ai.client.operations._patch import AgentsOperations +from azure.identity import DefaultAzureCredential +from azure.ai.client.models import FunctionTool, ToolSet + + +import os +from typing import Any + +from user_functions import user_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +""" +ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) +""" + + +class MyEventHandler(AgentEventHandler): + + def __init__(self, agents: AgentsOperations): + self._agents = agents + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + self._handle_submit_tool_outputs(run) + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + def _handle_submit_tool_outputs(self, run: "ThreadRun") -> None: + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls to execute.") + return + + toolset = self._agents.get_toolset() + if toolset: + tool_outputs = toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + with self._agents.submit_tool_outputs_to_run( + thread_id=run.thread_id, + run_id=run.id, + tool_outputs=tool_outputs, + stream=True, + event_handler=self + ) as stream: + stream.until_done() + + +functions = FunctionTool(user_functions) +toolset = ToolSet() +toolset.add(functions) + + +with ai_client: + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset + ) + print(f"Created agent, ID: {agent.id}") + + thread = ai_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, send an email with the datetime and weather information in New York? Also let me know the details") + print(f"Created message, message ID {message.id}") + + with ai_client.agents.create_and_process_run( + thread_id=thread.id, + assistant_id=agent.id, + stream=True, + event_handler=MyEventHandler(ai_client.agents) + ) as stream: + stream.until_done() + + ai_client.agents.delete_agent(agent.id) + print("Deleted assistant") + + messages = ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py index 185e1c775283..416c2dfe3c31 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py @@ -3,6 +3,24 @@ # Licensed under the MIT License. # ------------------------------------ +""" +FILE: sample_agents_stream_iteration_with_toolset.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with toolset in streaming from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_iteration_with_toolset.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + import os from azure.ai.client import AzureAIClient from azure.identity import DefaultAzureCredential @@ -15,6 +33,7 @@ RunStep, ) + # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables @@ -38,49 +57,50 @@ ) """ -# Create an agent and run stream with iteration -agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" -) -print(f"Created agent, ID {agent.id}") +with ai_client: + # Create an agent and run stream with iteration + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created agent, ID {agent.id}") -thread = ai_client.agents.create_thread() -print(f"Created thread, thread ID {thread.id}") + thread = ai_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") -message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") -print(f"Created message, message ID {message.id}") + message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") -with ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, stream=True) as stream: + with ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, stream=True) as stream: - for event_type, event_data in stream: + for event_type, event_data in stream: - if isinstance(event_data, MessageDeltaChunk): - for content_part in event_data.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") + if isinstance(event_data, MessageDeltaChunk): + for content_part in event_data.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") - elif isinstance(event_data, ThreadMessage): - print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") - elif isinstance(event_data, ThreadRun): - print(f"ThreadRun status: {event_data.status}") + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") - elif isinstance(event_data, RunStep): - print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") - elif event_type == AgentStreamEvent.ERROR: - print(f"An error occurred. Data: {event_data}") + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") - elif event_type == AgentStreamEvent.DONE: - print("Stream completed.") - break + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + break - else: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") -ai_client.agents.delete_agent(agent.id) -print("Deleted agent") + ai_client.agents.delete_agent(agent.id) + print("Deleted agent") -messages = ai_client.agents.list_messages(thread_id=thread.id) -print(f"Messages: {messages}") + messages = ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py new file mode 100644 index 000000000000..6033779f5184 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py @@ -0,0 +1,152 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_iteration_with_toolset.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with toolset and iteration in streaming from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_iteration_with_toolset.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.client import AzureAIClient +from azure.ai.client.models import AgentStreamEvent +from azure.ai.client.models import Agent, MessageDeltaChunk, MessageDeltaTextContent, RunStep, SubmitToolOutputsAction, ThreadMessage, ThreadRun +from azure.ai.client.models import FunctionTool, ToolSet +from azure.ai.client.operations._operations import AgentsOperations +from azure.identity import DefaultAzureCredential +from user_functions import user_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +""" +ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) +""" + +# Function to handle tool stream iteration +def handle_submit_tool_outputs(operatiions: AgentsOperations, thread_id, run_id, tool_outputs): + try: + with operatiions.submit_tool_outputs_to_run( + thread_id=thread_id, + run_id=run_id, + tool_outputs=tool_outputs, + stream=True + ) as tool_stream: + for tool_event_type, tool_event_data in tool_stream: + if tool_event_type == AgentStreamEvent.ERROR: + print(f"An error occurred in tool stream. Data: {tool_event_data}") + elif tool_event_type == AgentStreamEvent.DONE: + print("Tool stream completed.") + break + else: + if isinstance(tool_event_data, MessageDeltaChunk): + handle_message_delta(tool_event_data) + + except Exception as e: + print(f"Failed to process tool stream: {e}") + + +# Function to handle message delta chunks +def handle_message_delta(delta: MessageDeltaChunk) -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + +functions = FunctionTool(user_functions) +toolset = ToolSet() +toolset.add(functions) + +with ai_client: + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = ai_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what's the time?") + print(f"Created message, message ID {message.id}") + + with ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, stream=True) as stream: + + for event_type, event_data in stream: + + if isinstance(event_data, MessageDeltaChunk): + handle_message_delta(event_data) + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + if event_data.status == "failed": + print(f"Run failed. Error: {event_data.last_error}") + + if event_data.status == "requires_action" and isinstance(event_data.required_action, SubmitToolOutputsAction): + tool_calls = event_data.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls to execute.") + break + + toolset = ai_client.agents.get_toolset() + if toolset: + tool_outputs = toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + if tool_outputs: + handle_submit_tool_outputs(ai_client.agents, event_data.thread_id, event_data.id, tool_outputs) + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + ai_client.agents.delete_agent(agent.id) + print("Deleted assistant") + + messages = ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py new file mode 100644 index 000000000000..97de3ba1382e --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py @@ -0,0 +1,99 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_with_file_search_attachment.py + +DESCRIPTION: + This sample demonstrates how to use agent operations to create messages with file search attachments from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_with_file_search_attachment.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.client import AzureAIClient +from azure.ai.client.models import FilePurpose +from azure.ai.client.models import FileSearchToolResource, MessageAttachment, ToolResources +from azure.ai.client.models import FileSearchTool, ToolSet +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] + +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + connection=connection_string, +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +""" +ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) +""" + +with ai_client: + + # upload a file and wait for it to be processed + file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS, sleep_interval=4) + print(f"Uploaded file, file ID: {file.id}") + + # create a vector store with the file and wait for it to be processed + # if you do not specify a vector store, create_message will create a vector store with a default expiration policy of seven days after they were last active + vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[file.id], name="sample_vector_store", sleep_interval=4) + print(f"Created vector store, vector store ID: {vector_store.id}") + + file_search_tool = FileSearchTool() + file_search_tool.add_vector_store(vector_store.id) + + # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to search the file + # also, you do not need to provide tool_resources if you did not create a vector store above + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", + tools=[file_search_tool], + tool_resources=ToolResources(file_search=FileSearchToolResource(vector_store_ids=[vector_store.id])) + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = ai_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # create a message with the attachment + attachment = MessageAttachment(file_id=file.id, tools=[file_search_tool.definitions]) + message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment]) + print(f"Created message, message ID: {message.id}") + + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, sleep_interval=4) + print(f"Created run, run ID: {run.id}") + + ai_client.agents.delete_file(file.id) + print("Deleted file") + + ai_client.agents.delete_vector_store(vector_store.id) + print("Deleted vectore store") + + ai_client.agents.delete_agent(agent.id) + print("Deleted assistant") + + messages = ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") From b2fa08ec76936778e6dfd595993f131f943f1cb2 Mon Sep 17 00:00:00 2001 From: Glenn Harper <64209257+glharper@users.noreply.github.com> Date: Fri, 11 Oct 2024 05:58:00 -0700 Subject: [PATCH 023/138] [AI Client] adapt Sophia's assistant tests to client repo (#37819) * [AI Client] adapt Sophia's assistant tests to client repo * get tests running and passing with pytest * recordings pass and pushed * review feedback --- sdk/ai/azure-ai-client/assets.json | 6 + sdk/ai/azure-ai-client/tests/README.md | 79 ++ .../tests/agents/test_agents_client.py | 1085 +++++++++++++++++ sdk/ai/azure-ai-client/tests/conftest.py | 20 + 4 files changed, 1190 insertions(+) create mode 100644 sdk/ai/azure-ai-client/assets.json create mode 100644 sdk/ai/azure-ai-client/tests/README.md create mode 100644 sdk/ai/azure-ai-client/tests/agents/test_agents_client.py create mode 100644 sdk/ai/azure-ai-client/tests/conftest.py diff --git a/sdk/ai/azure-ai-client/assets.json b/sdk/ai/azure-ai-client/assets.json new file mode 100644 index 000000000000..a2aabb4cbaf7 --- /dev/null +++ b/sdk/ai/azure-ai-client/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "python", + "TagPrefix": "python/ai/azure-ai-client", + "Tag": "python/ai/azure-ai-client_246b906947" +} diff --git a/sdk/ai/azure-ai-client/tests/README.md b/sdk/ai/azure-ai-client/tests/README.md new file mode 100644 index 000000000000..76d76aee9c52 --- /dev/null +++ b/sdk/ai/azure-ai-client/tests/README.md @@ -0,0 +1,79 @@ +# copied from azure-ai-inference TODO update + +# Azure AI client library tests for Python + +The instructions below are for running tests locally, on a Windows machine, against the live service. + +## Prerequisites + +The live tests were written against the AI models mentioned below. You will need to deploy a gpt-4o model in the Azure OpenAI Studio, and have the endpoint and key for it: + +- `gpt-4o` on Azure OpenAI (AOAI), for Agents tests + +## Setup + +- Clone or download this sample repository. +- Open a command prompt window in the folder `sdk\ai\azure-ai-client`. +- If you want to run tests against the latest published client library, install it by running: + ```bash + pip install azure-ai-client + ``` +- If you want to run tests against a locally built client library: + - First build the wheel: + ```bash + pip install wheel + pip install -r dev_requirements.txt + python setup.py bdist_wheel + ``` + - Then install the resulting local wheel (update version `1.0.0b2` to the current one): + ```bash + pip install dist\azure_ai_client-1.0.0b1-py3-none-any.whl --user --force-reinstall + ``` + +## Set environment variables + +Here is the list of environment variables used by the tests: + +```bash +# For agents, including tools +set AZURE_AI_CLIENT_AGENTS_CONNECTION_STRING= +``` + + + +## Configure test proxy + +Configure the test proxy to run live service tests without recordings: + +```bash +set AZURE_TEST_RUN_LIVE=true +set AZURE_SKIP_LIVE_RECORDING=true +set PROXY_URL=http://localhost:5000 +set AZURE_TEST_USE_CLI_AUTH=true +``` + +## Run tests + +To run all tests, type: + +```bash +pytest +``` + +For windows run: + +```bash +python -m pytest tests\agents +``` + +## Additional information + +See [test documentation](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md) for additional information, including how to set proxy recordings and run tests using recordings. \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py new file mode 100644 index 000000000000..361f7154d9bd --- /dev/null +++ b/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py @@ -0,0 +1,1085 @@ +# # ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os +import json +import time +import functools +import datetime +import logging +import sys + +from azure.ai.client import AzureAIClient +from azure.ai.client.models import FunctionTool, CodeInterpreterTool, FileSearchTool, ToolSet +from azure.core.pipeline.transport import RequestsTransport +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy +from azure.core.exceptions import AzureError, ServiceRequestError, HttpResponseError +from azure.ai.client.models import FunctionTool +from azure.identity import DefaultAzureCredential + +# TODO clean this up / get rid of anything not in use + +''' +issues I've noticed with the code: + delete_thread(thread.id) fails + cancel_thread(thread.id) expires/times out occasionally + added time.sleep() to the beginning of my last few tests to avoid limits + when using the endpoint from Howie, delete_agent(agent.id) did not work but would not cause an error +''' + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + + +agentClientPreparer = functools.partial( + EnvironmentVariableLoader, + 'azure_ai_client', + azure_ai_client_agents_connection_string="https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", +) +""" +agentClientPreparer = functools.partial( + EnvironmentVariableLoader, + 'azure_ai_client', + azure_ai_client_host_name="https://foo.bar.some-domain.ms", + azure_ai_client_subscription_id="00000000-0000-0000-0000-000000000000", + azure_ai_client_resource_group_name="rg-resour-cegr-oupfoo1", + azure_ai_client_workspace_name="abcd-abcdabcdabcda-abcdefghijklm", +) +""" + +# create tool for agent use +def fetch_current_datetime_live(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + time_json = json.dumps({"current_time": current_datetime}) + return time_json + +# create tool for agent use +def fetch_current_datetime_recordings(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + time_json = json.dumps({"current_time": "2024-10-10 12:30:19"}) + return time_json + + +# Statically defined user functions for fast reference +user_functions_recording = {"fetch_current_datetime": fetch_current_datetime_recordings} +user_functions_live = {"fetch_current_datetime": fetch_current_datetime_live} + + +# The test class name needs to start with "Test" to get collected by pytest +class TestagentClient(AzureRecordedTestCase): + + # helper function: create client and using environment variables + def create_client(self, **kwargs): + # fetch environment variables + connection_string = kwargs.pop("azure_ai_client_agents_connection_string") + credential = self.get_credential(AzureAIClient, is_async=False) + + # create and return client + client = AzureAIClient.from_connection_string( + credential=credential, + connection=connection_string, + ) + + return client + + # for debugging purposes: if a test fails and its agent has not been deleted, it will continue to show up in the agents list + """ + # NOTE: this test should not be run against a shared resource, as it will delete all agents + @agentClientPreparer() + @recorded_by_proxy + def test_clear_client(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # clear agent list + agents = client.agents.list_agents().data + for agent in agents: + client.agents.delete_agent(agent.id) + assert client.agents.list_agents().data.__len__() == 0 + + # close client + client.close() + """ + + +# # ********************************************************************************** +# # +# # UNIT TESTS +# # +# # ********************************************************************************** + + + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - agent APIs + # # + # # ********************************************************************************** + + # test client creation + @agentClientPreparer() + @recorded_by_proxy + def test_create_client(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # close client + client.close() + + # test agent creation and deletion + @agentClientPreparer() + @recorded_by_proxy + def test_create_delete_agent(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + print("Created client") + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test agent creation with tools + @agentClientPreparer() + @recorded_by_proxy + def test_create_agent_with_tools(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # initialize agent functions + functions = FunctionTool(functions=user_functions_recording) + + # create agent with tools + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=functions.definitions) + assert agent.id + print("Created agent, agent ID", agent.id) + assert agent.tools + assert agent.tools[0]['function']['name'] == functions.definitions[0]['function']['name'] + print("Tool successfully submitted:", functions.definitions[0]['function']['name']) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + @agentClientPreparer() + @recorded_by_proxy + def test_update_agent(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + + # update agent and confirm changes went through + agent.update(name="my-agent2", instructions="You are helpful agent") + assert agent.name + assert agent.name == "my-agent2" + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + """ + DISABLED: can't perform consistently on shared resource + @agentClientPreparer() + @recorded_by_proxy + def test_agent_list(self, **kwargs): + # create client and ensure there are no previous agents + client = self.create_client(**kwargs) + list_length = client.agents.list_agents().data.__len__() + + # create agent and check that it appears in the list + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert client.agents.list_agents().data.__len__() == list_length + 1 + assert client.agents.list_agents().data[0].id == agent.id + + # create second agent and check that it appears in the list + agent2 = client.agents.create_agent(model="gpt-4o", name="my-agent2", instructions="You are helpful agent") + assert client.agents.list_agents().data.__len__() == list_length + 2 + assert client.agents.list_agents().data[0].id == agent.id or client.agents.list_agents().data[1].id == agent.id + + # delete agents and check list + client.agents.delete_agent(agent.id) + assert client.agents.list_agents().data.__len__() == list_length + 1 + assert client.agents.list_agents().data[0].id == agent2.id + + client.agents.delete_agent(agent2.id) + assert client.agents.list_agents().data.__len__() == list_length + print("Deleted agents") + + # close client + client.close() + """ + + # ********************************************************************************** + # + # HAPPY PATH SERVICE TESTS - Thread APIs + # + # ********************************************************************************** + + # test creating thread + @agentClientPreparer() + @recorded_by_proxy + def test_create_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + # assert isinstance(thread, agentThread) TODO finish this ! need to import agentThread from _models + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test getting thread + @agentClientPreparer() + @recorded_by_proxy + def test_get_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # get thread + thread2 = client.agents.get_thread(thread.id) + assert thread2.id + assert thread.id == thread2.id + print("Got thread, thread ID", thread2.id) + + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + ''' + TODO what can I update a thread with? + # test updating thread + @agentClientPreparer() + @recorded_by_proxy + def test_update_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # update thread + client.agents.update_thread(thread.id, ) # TODO what can we update it with? + assert not thread + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + ''' + + ''' + # TODO this test is failing? client.agents.delete_thread(thread.id) isn't working + # status_code = 404, response = + # error_map = {304: , 401: , 409: } + + # test deleting thread + @agentClientPreparer() + @recorded_by_proxy + def test_delete_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + # assert isinstance(thread, agentThread) TODO finish this ! need to import agentThread from _models + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete thread + deletion_status = client.agents.delete_thread(thread.id) + # assert not thread + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + ''' + + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Message APIs + # # + # # ********************************************************************************** + + # test creating message in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_create_message(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test creating multiple messages in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_create_multiple_messages(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create messages + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") + assert message2.id + print("Created message, message ID", message2.id) + message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") + assert message3.id + print("Created message, message ID", message3.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test listing messages in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_list_messages(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check that initial message list is empty + messages0 = client.agents.list_messages(thread_id=thread.id) + print(messages0.data) + assert messages0.data.__len__() == 0 + + # create messages and check message list for each one + message1 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message1.id + print("Created message, message ID", message1.id) + messages1 = client.agents.list_messages(thread_id=thread.id) + assert messages1.data.__len__() == 1 + assert messages1.data[0].id == message1.id + + message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") + assert message2.id + print("Created message, message ID", message2.id) + messages2 = client.agents.list_messages(thread_id=thread.id) + assert messages2.data.__len__() == 2 + assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id + + message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") + assert message3.id + print("Created message, message ID", message3.id) + messages3 = client.agents.list_messages(thread_id=thread.id) + assert messages3.data.__len__() == 3 + assert messages3.data[0].id == message3.id or messages3.data[1].id == message2.id or messages3.data[2].id == message2.id + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test getting message in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_get_message(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # get message + message2 = client.agents.get_message(thread_id=thread.id, message_id=message.id) + assert message2.id + assert message.id == message2.id + print("Got message, message ID", message.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + ''' + TODO format the updated body + # test updating message in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_update_message(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # update message + body_json = json.dumps # TODO format body into json -- figure out what the message looks like so I can update it (might be in that picture) + client.agents.update_message(thread_id=thread.id, message_id=message.id, body=) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + ''' + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Run APIs + # # + # # ********************************************************************************** + + # test creating run + @agentClientPreparer() + @recorded_by_proxy + def test_create_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test getting run + @agentClientPreparer() + @recorded_by_proxy + def test_get_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # get run + run2 = client.agents.get_run(thread_id=thread.id, run_id=run.id) + assert run2.id + assert run.id == run2.id + print("Got run, run ID", run2.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # TODO fix bc sometimes it works? and sometimes it doesnt? + # test sucessful run status TODO test for cancelled/unsucessful runs + @agentClientPreparer() + @recorded_by_proxy + def test_run_status(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", "expired"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + print("Run status:", run.status) + + assert run.status in ["cancelled", "failed", "completed", "expired"] + print("Run completed with status:", run.status) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + ''' + # TODO another, but check that the number of runs decreases after cancelling runs + # TODO can each thread only support one run? + # test listing runs + @agentClientPreparer() + @recorded_by_proxy + def test_list_runs(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check list for current runs + runs0 = client.agents.list_runs(thread_id=thread.id) + assert runs0.data.__len__() == 0 + + # create run and check list + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + runs1 = client.agents.list_runs(thread_id=thread.id) + assert runs1.data.__len__() == 1 + assert runs1.data[0].id == run.id + + # create second run + run2 = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run2.id + print("Created run, run ID", run2.id) + runs2 = client.agents.list_runs(thread_id=thread.id) + assert runs2.data.__len__() == 2 + assert runs2.data[0].id == run2.id or runs2.data[1].id == run2.id + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + ''' + + ''' + # TODO figure out what to update the run with + # test updating run + @agentClientPreparer() + @recorded_by_proxy + def test_update_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # update run + body = json.dumps({'todo': 'placeholder'}) + client.agents.update_run(thread_id=thread.id, run_id=run.id, body=body) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + ''' + + # test submitting tool outputs to run + @agentClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # Initialize agent tools + functions = FunctionTool(user_functions_recording) + code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent", toolset=toolset) + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]['function']['name'] == functions.definitions[0]['function']['name'] + print("Tool successfully submitted:", functions.definitions[0]['function']['name']) + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", "expired"] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") # TODO how can i make sure that it wants tools? should i have some kind of error message? + client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here + print("Tool outputs:", tool_outputs) + if tool_outputs: + client.agents.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + + # check that messages used the tool + messages = client.agents.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages['data'][0]['content'][0]['text']['value'] + hour12 = time.strftime("%H") + hour24 = time.strftime("%I") + minute = time.strftime("%M") + assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute + print("Used tool_outputs") + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + """ + # DISABLED: rewrite to ensure run is not complete when cancel_run is called + # test cancelling run + @agentClientPreparer() + @recorded_by_proxy + def test_cancel_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # check status and cancel + assert run.status in ["queued", "in_progress", "requires_action"] + client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + + while run.status in ["queued", "cancelling"]: + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + print("Current run status:", run.status) + assert run.status == "cancelled" + print("Run cancelled") + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + # test create thread and run + @agentClientPreparer() + @recorded_by_proxy + def test_create_thread_and_run(self, **kwargs): + time.sleep(26) + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread and run + run = client.agents.create_thread_and_run(assistant_id=agent.id) + assert run.id + assert run.thread_id + print("Created run, run ID", run.id) + + # get thread + thread = client.agents.get_thread(run.thread_id) + assert thread.id + print("Created thread, thread ID", thread.id) + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", "expired"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + # assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + assert run.status == "completed" + print("Run completed") + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test listing run steps + @agentClientPreparer() + @recorded_by_proxy + def test_list_run_step(self, **kwargs): + + time.sleep(50) + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + # commenting assertion out below, do we know exactly when run starts? + # assert steps['data'].__len__() == 0 + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + assert steps['data'].__len__() > 0 # TODO what else should we look at? + + assert run.status == "completed" + print("Run completed") + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test getting run step + # TODO where are step ids from + @agentClientPreparer() + @recorded_by_proxy + def test_get_run_step(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AzureAIClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, can you tell me a joke?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + if (run.status == "failed"): + assert run.last_error + print(run.last_error) + print("FAILED HERE") + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + if (run.status == "failed"): + assert run.last_error + print(run.last_error) + print("FAILED HERE") + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + # list steps, check that get_run_step works with first step_id + steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + assert steps['data'].__len__() > 0 + step = steps['data'][0] + get_step = client.agents.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + assert step == get_step + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Streaming APIs + # # + # # ********************************************************************************** + + + # # ********************************************************************************** + # # + # # NEGATIVE TESTS - TODO idk what goes here + # # + # # ********************************************************************************** + + """ + # DISABLED, PASSES LIVE ONLY: recordings don't capture DNS lookup errors + # test agent creation and deletion + @agentClientPreparer() + @recorded_by_proxy + def test_negative_create_delete_agent(self, **kwargs): + # create client using bad endpoint + bad_connection_string = "https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm" + + credential = self.get_credential(AzureAIClient, is_async=False) + client = AzureAIClient.from_connection_string( + credential=credential, + connection=bad_connection_string, + ) + + # attempt to create agent with bad client + exception_caught = False + try: + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + # check for error (will not have a status code since it failed on request -- no response was recieved) + except (ServiceRequestError, HttpResponseError) as e: + exception_caught = True + if type(e) == ServiceRequestError: + assert e.message + assert "failed to resolve 'foo.bar.some-domain.ms'" in e.message.lower() + else: + assert "No such host is known" and "foo.bar.some-domain.ms" in str(e) + + # close client and confirm an exception was caught + client.close() + assert exception_caught + """ + diff --git a/sdk/ai/azure-ai-client/tests/conftest.py b/sdk/ai/azure-ai-client/tests/conftest.py new file mode 100644 index 000000000000..6a5c0fe7070b --- /dev/null +++ b/sdk/ai/azure-ai-client/tests/conftest.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import pytest +from devtools_testutils import test_proxy, remove_batch_sanitizers + +# autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method +@pytest.fixture(scope="session", autouse=True) +def start_proxy(test_proxy): + return + + +@pytest.fixture(scope="session", autouse=True) +def add_sanitizers(test_proxy): + # Remove the following sanitizers since certain fields are needed in tests and are non-sensitive: + # - AZSDK3493: $..name + remove_batch_sanitizers(["AZSDK3493"]) + \ No newline at end of file From 1eea30459c1464cb54d54f4153d8b8634d68bb4f Mon Sep 17 00:00:00 2001 From: howieleung Date: Fri, 11 Oct 2024 09:57:46 -0700 Subject: [PATCH 024/138] Took out streaming from create_run, create_and_process_run, and submit_tool_outputs_to_run and declare separate functions for streaming (#37851) * Took out streaming from create_run, create_and_process_run, and submit_tool_outputs_to_run and declare separate functions for streaming * Took out create_and_process_stream and update samples accordingly * fixed samples --- .../azure/ai/client/aio/operations/_patch.py | 555 ++++++++++++++---- .../azure/ai/client/operations/_patch.py | 465 +++++++++++++-- ...sample_agents_stream_eventhandler_async.py | 3 +- ..._stream_eventhandler_with_toolset_async.py | 6 +- .../sample_agents_stream_iteration_async.py | 2 +- ...mple_agents_code_interpreter_attachment.py | 2 +- .../sample_agents_stream_eventhandler.py | 4 +- ...agents_stream_eventhandler_with_toolset.py | 6 +- .../agents/sample_agents_stream_iteration.py | 2 +- ...le_agents_stream_iteration_with_toolset.py | 5 +- 10 files changed, 876 insertions(+), 174 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index 66c3c87bc38a..83bdd5c11161 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -435,7 +435,7 @@ def get_toolset(self) -> Optional[_models.ToolSet]: @overload async def create_run( self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + ) -> _models.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Required. @@ -462,7 +462,6 @@ async def create_run( additional_instructions: Optional[str] = None, additional_messages: Optional[List[_models.ThreadMessage]] = None, tools: Optional[List[_models.ToolDefinition]] = None, - stream: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, @@ -471,9 +470,9 @@ async def create_run( tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Required. @@ -483,11 +482,11 @@ async def create_run( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. - Default value is None. + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run - the thread. Default value is None. + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. :paramtype instructions: str :keyword additional_instructions: Additional instructions to append at the end of the instructions for the run. This is useful for modifying the behavior @@ -499,11 +498,6 @@ async def create_run( :keyword tools: The overridden list of enabled tools that the agent should use to run the thread. Default value is None. :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword stream: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream: bool :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -536,15 +530,13 @@ async def create_run( :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or - ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or ~azure.ai.client.models.AgentsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or - ~azure.ai.client.models.AgentsApiResponseFormatMode or - ~azure.ai.client.models.AgentsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is @@ -552,7 +544,7 @@ async def create_run( :paramtype metadata: dict[str, str] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -561,7 +553,7 @@ async def create_run( @overload async def create_run( self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + ) -> _models.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Required. @@ -588,7 +580,6 @@ async def create_run( additional_instructions: Optional[str] = None, additional_messages: Optional[List[_models.ThreadMessage]] = None, tools: Optional[List[_models.ToolDefinition]] = None, - stream: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, @@ -597,9 +588,9 @@ async def create_run( tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Required. @@ -608,11 +599,11 @@ async def create_run( :type body: JSON or IO[bytes] :keyword assistant_id: The ID of the agent that should run the thread. Required. :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. - Default value is None. + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run - the thread. Default value is None. + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. :paramtype instructions: str :keyword additional_instructions: Additional instructions to append at the end of the instructions for the run. This is useful for modifying the behavior @@ -624,11 +615,6 @@ async def create_run( :keyword tools: The overridden list of enabled tools that the agent should use to run the thread. Default value is None. :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword stream: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream: bool :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -661,15 +647,13 @@ async def create_run( :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or - ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or ~azure.ai.client.models.AgentsNamedToolChoice :keyword response_format: Specifies the format that the model must output. Is one of the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or - ~azure.ai.client.models.AgentsApiResponseFormatMode or - ~azure.ai.client.models.AgentsApiResponseFormat + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used for storing additional information about that object in a structured format. Keys may be up to 64 characters in length and values may be up to 512 characters in length. Default value is @@ -677,7 +661,7 @@ async def create_run( :paramtype metadata: dict[str, str] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -696,8 +680,8 @@ async def create_run( additional_instructions=additional_instructions, additional_messages=additional_messages, tools=tools, - stream_parameter=stream, - stream=stream, + stream_parameter=False, + stream=False, temperature=temperature, top_p=top_p, max_prompt_tokens=max_prompt_tokens, @@ -706,7 +690,7 @@ async def create_run( tool_choice=tool_choice, response_format=response_format, metadata=metadata, - **kwargs + **kwargs, ) elif isinstance(body, io.IOBase): # Handle overload with binary body. @@ -717,10 +701,7 @@ async def create_run( raise ValueError("Invalid combination of arguments provided.") # If streaming is enabled, return the custom stream object - if stream: - return _models.AsyncAgentRunStream(await response, event_handler) - else: - return await response + return await response @distributed_trace_async async def create_and_process_run( @@ -732,7 +713,6 @@ async def create_and_process_run( additional_instructions: Optional[str] = None, additional_messages: Optional[List[_models.ThreadMessage]] = None, tools: Optional[List[_models.ToolDefinition]] = None, - stream: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, @@ -741,10 +721,10 @@ async def create_and_process_run( tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AsyncAgentEventHandler] = None, + event_handler: Optional[_models.AgentEventHandler] = None, sleep_interval: int = 1, - **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + **kwargs: Any, + ) -> _models.ThreadRun: """Creates a new run for an agent thread and processes the run. :param thread_id: Required. @@ -767,11 +747,6 @@ async def create_and_process_run( :keyword tools: The overridden list of enabled tools that the agent should use to run the thread. Default value is None. :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword stream: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream: bool :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -820,13 +795,12 @@ async def create_and_process_run( :paramtype metadata: dict[str, str] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler :keyword sleep_interval: The time in seconds to wait between polling the service for run status. Default value is 1. - :paramtype sleep_interval: int - :return: str or AsyncAgentRunStream. The run completion status if streaming is disabled, otherwise - the AsyncAgentRunStream object. - :rtype: str or ~azure.ai.client.models.AsyncAgentRunStream + :paramtype sleep_interval: int + :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AsyncAgentRunStream :raises ~azure.core.exceptions.HttpResponseError: """ # Create and initiate the run with additional parameters @@ -838,7 +812,6 @@ async def create_and_process_run( additional_instructions=additional_instructions, additional_messages=additional_messages, tools=tools, - stream=stream, temperature=temperature, top_p=top_p, max_prompt_tokens=max_prompt_tokens, @@ -848,13 +821,9 @@ async def create_and_process_run( response_format=response_format, metadata=metadata, event_handler=event_handler, - **kwargs + **kwargs, ) - - # Return the run stream object if streaming is enabled - if stream: - return run - + # Monitor and process the run status while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(sleep_interval) @@ -863,8 +832,8 @@ async def create_and_process_run( if run.status == "requires_action" and run.required_action.submit_tool_outputs: tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: - logger.warning("No tool calls provided - cancelling run") - await self.cancel_run(thread_id=thread_id, run_id=run.id) + logging.warning("No tool calls provided - cancelling run") + self.cancel_run(thread_id=thread_id, run_id=run.id) break toolset = self.get_toolset() @@ -872,19 +841,289 @@ async def create_and_process_run( tool_outputs = await toolset.execute_tool_calls(tool_calls) else: raise ValueError("Toolset is not available in the client.") - - logger.info("Tool outputs: %s", tool_outputs) + + logging.info("Tool outputs: %s", tool_outputs) if tool_outputs: - await self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) + self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) - logger.info("Current run status: %s", run.status) + logging.info("Current run status: %s", run.status) return run + @overload + async def create_stream( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AsyncAgentRunStream: + """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_stream( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AsyncAgentRunStream: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler + :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_stream( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AsyncAgentRunStream: + """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AsyncAgentRunStream: + """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler + :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=True, + stream=True, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + return _models.AsyncAgentRunStream(await response, event_handler) + @overload async def submit_tool_outputs_to_run( self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. @@ -911,10 +1150,9 @@ async def submit_tool_outputs_to_run( *, tool_outputs: List[_models.ToolOutput], content_type: str = "application/json", - stream: Optional[bool] = None, event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + **kwargs: Any, + ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. @@ -928,11 +1166,9 @@ async def submit_tool_outputs_to_run( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword stream: Default value is None. - :paramtype stream: bool :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -941,7 +1177,7 @@ async def submit_tool_outputs_to_run( @overload async def submit_tool_outputs_to_run( self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. @@ -968,10 +1204,9 @@ async def submit_tool_outputs_to_run( body: Union[JSON, IO[bytes]] = _Unset, *, tool_outputs: List[_models.ToolOutput] = _Unset, - stream: Optional[bool] = None, event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AsyncAgentRunStream]: + **kwargs: Any, + ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. @@ -982,42 +1217,158 @@ async def submit_tool_outputs_to_run( :type run_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :param tool_outputs: List of tool outputs to submit. - :param stream: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] :param event_handler: The event handler to use for processing events during the run. :param kwargs: Additional parameters. :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :raises: HttpResponseError for HTTP errors. + :rtype: ~azure.ai.client.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: """ if isinstance(body, dict): content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run( - thread_id, run_id, body, content_type=content_type, **kwargs - ) + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) elif tool_outputs is not _Unset: response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=stream, stream=stream, **kwargs + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs ) elif isinstance(body, io.IOBase): content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run( - thread_id, run_id, body, content_type=content_type, **kwargs - ) + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) else: raise ValueError("Invalid combination of arguments provided.") - + # If streaming is enabled, return the custom stream object - if stream: - return _models.AsyncAgentRunStream(await response, event_handler) + return await response + + @overload + async def submit_tool_outputs_to_stream( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AsyncAgentRunStream: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AsyncAgentRunStream: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler + :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_stream( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AsyncAgentRunStream: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AsyncAgentRunStream: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] + :param event_handler: The event handler to use for processing events during the run. + :param kwargs: Additional parameters. + :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + else: - return await response + raise ValueError("Invalid combination of arguments provided.") + + return _models.AsyncAgentRunStream(await response, event_handler) + @overload async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index ae2ffb5bbe56..dd351ad9b3a8 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -453,7 +453,7 @@ def get_toolset(self) -> Optional[_models.ToolSet]: @overload def create_run( self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + ) -> _models.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Required. @@ -480,7 +480,6 @@ def create_run( additional_instructions: Optional[str] = None, additional_messages: Optional[List[_models.ThreadMessage]] = None, tools: Optional[List[_models.ToolDefinition]] = None, - stream: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, @@ -491,7 +490,7 @@ def create_run( metadata: Optional[Dict[str, str]] = None, event_handler: Optional[_models.AgentEventHandler] = None, **kwargs: Any, - ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + ) -> _models.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Required. @@ -517,11 +516,6 @@ def create_run( :keyword tools: The overridden list of enabled tools that the agent should use to run the thread. Default value is None. :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword stream: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream: bool :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -577,7 +571,7 @@ def create_run( @overload def create_run( self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + ) -> _models.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Required. @@ -604,7 +598,6 @@ def create_run( additional_instructions: Optional[str] = None, additional_messages: Optional[List[_models.ThreadMessage]] = None, tools: Optional[List[_models.ToolDefinition]] = None, - stream: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, @@ -615,7 +608,7 @@ def create_run( metadata: Optional[Dict[str, str]] = None, event_handler: Optional[_models.AgentEventHandler] = None, **kwargs: Any, - ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + ) -> _models.ThreadRun: """Creates a new run for an agent thread. :param thread_id: Required. @@ -640,11 +633,6 @@ def create_run( :keyword tools: The overridden list of enabled tools that the agent should use to run the thread. Default value is None. :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword stream: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream: bool :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -710,8 +698,8 @@ def create_run( additional_instructions=additional_instructions, additional_messages=additional_messages, tools=tools, - stream_parameter=stream, - stream=stream, + stream_parameter=False, + stream=False, temperature=temperature, top_p=top_p, max_prompt_tokens=max_prompt_tokens, @@ -731,10 +719,7 @@ def create_run( raise ValueError("Invalid combination of arguments provided.") # If streaming is enabled, return the custom stream object - if stream: - return _models.AgentRunStream(response, event_handler) - else: - return response + return response @distributed_trace def create_and_process_run( @@ -746,7 +731,6 @@ def create_and_process_run( additional_instructions: Optional[str] = None, additional_messages: Optional[List[_models.ThreadMessage]] = None, tools: Optional[List[_models.ToolDefinition]] = None, - stream: Optional[bool] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, max_prompt_tokens: Optional[int] = None, @@ -758,7 +742,7 @@ def create_and_process_run( event_handler: Optional[_models.AgentEventHandler] = None, sleep_interval: int = 1, **kwargs: Any, - ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + ) -> _models.ThreadRun: """Creates a new run for an agent thread and processes the run. :param thread_id: Required. @@ -781,11 +765,6 @@ def create_and_process_run( :keyword tools: The overridden list of enabled tools that the agent should use to run the thread. Default value is None. :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword stream: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream: bool :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Default @@ -838,9 +817,8 @@ def create_and_process_run( :keyword sleep_interval: The time in seconds to wait between polling the service for run status. Default value is 1. :paramtype sleep_interval: int - :return: str or AgentRunStream. The run completion status if streaming is disabled, otherwise - the AgentRunStream object. - :rtype: str or ~azure.ai.client.models.AgentRunStream + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AgentRunStream :raises ~azure.core.exceptions.HttpResponseError: """ # Create and initiate the run with additional parameters @@ -852,7 +830,6 @@ def create_and_process_run( additional_instructions=additional_instructions, additional_messages=additional_messages, tools=tools, - stream=stream, temperature=temperature, top_p=top_p, max_prompt_tokens=max_prompt_tokens, @@ -864,11 +841,7 @@ def create_and_process_run( event_handler=event_handler, **kwargs, ) - - # Return the run stream object if streaming is enabled - if stream: - return run - + # Monitor and process the run status while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(sleep_interval) @@ -895,10 +868,281 @@ def create_and_process_run( return run + @overload + def create_stream( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_stream( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AgentRunStream: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_stream( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentRunStream: + """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AgentRunStream: + """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.client.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.client.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=True, + stream=True, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + return _models.AgentRunStream(response, event_handler) + + @overload def submit_tool_outputs_to_run( self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. @@ -925,10 +1169,9 @@ def submit_tool_outputs_to_run( *, tool_outputs: List[_models.ToolOutput], content_type: str = "application/json", - stream: Optional[bool] = None, event_handler: Optional[_models.AgentEventHandler] = None, **kwargs: Any, - ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. @@ -942,8 +1185,6 @@ def submit_tool_outputs_to_run( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :keyword stream: Default value is None. - :paramtype stream: bool :keyword event_handler: The event handler to use for processing events during the run. Default value is None. :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler @@ -955,7 +1196,7 @@ def submit_tool_outputs_to_run( @overload def submit_tool_outputs_to_run( self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. @@ -982,10 +1223,9 @@ def submit_tool_outputs_to_run( body: Union[JSON, IO[bytes]] = _Unset, *, tool_outputs: List[_models.ToolOutput] = _Unset, - stream: Optional[bool] = None, event_handler: Optional[_models.AgentEventHandler] = None, **kwargs: Any, - ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. @@ -998,10 +1238,6 @@ def submit_tool_outputs_to_run( :type body: JSON or IO[bytes] :keyword tool_outputs: Required. :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] - :param stream: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. :param event_handler: The event handler to use for processing events during the run. :param kwargs: Additional parameters. :return: ThreadRun. The ThreadRun is compatible with MutableMapping @@ -1015,7 +1251,7 @@ def submit_tool_outputs_to_run( elif tool_outputs is not _Unset: response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=stream, stream=stream, **kwargs + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs ) elif isinstance(body, io.IOBase): @@ -1026,10 +1262,131 @@ def submit_tool_outputs_to_run( raise ValueError("Invalid combination of arguments provided.") # If streaming is enabled, return the custom stream object - if stream: - return _models.AgentRunStream(response, event_handler) + return response + + @overload + def submit_tool_outputs_to_stream( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentRunStream: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AgentRunStream: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_stream( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentRunStream: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AgentRunStream: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] + :param event_handler: The event handler to use for processing events during the run. + :param kwargs: Additional parameters. + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.client.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + else: - return response + raise ValueError("Invalid combination of arguments provided.") + + return _models.AgentRunStream(response, event_handler) @overload def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py index eb0b65bb36f7..63476c3cacd1 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -92,10 +92,9 @@ async def main(): message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID {message.id}") - async with await ai_client.agents.create_and_process_run( + async with await ai_client.agents.create_stream( thread_id=thread.id, assistant_id=agent.id, - stream=True, event_handler=MyEventHandler() ) as stream: await stream.until_done() diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py index 86c7ec596e09..777331e2afd0 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py @@ -89,11 +89,10 @@ async def _handle_submit_tool_outputs(self, run: "ThreadRun") -> None: print(f"Tool outputs: {tool_outputs}") if tool_outputs: - async with await self._agents.submit_tool_outputs_to_run( + async with await self._agents.submit_tool_outputs_to_stream( thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, - stream=True, event_handler=self ) as stream: await stream.until_done() @@ -141,10 +140,9 @@ async def main(): message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, send an email with the datetime and weather information in New York? Also let me know the details") print(f"Created message, message ID {message.id}") - async with await ai_client.agents.create_and_process_run( + async with await ai_client.agents.create_stream( thread_id=thread.id, assistant_id=agent.id, - stream=True, event_handler=MyEventHandler(ai_client.agents) ) as stream: await stream.until_done() diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py index 69f50d7cd009..fc6cc13efa93 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py @@ -65,7 +65,7 @@ async def main(): message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID {message.id}") - async with await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, stream=True) as stream: + async with await ai_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: async for event_type, event_data in stream: if isinstance(event_data, MessageDeltaChunk): diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py index 7cfd3d89f47a..fda7c863fc56 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py @@ -57,7 +57,7 @@ file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS, sleep_interval=4) print(f"Uploaded file, file ID: {file.id}") - code_interpreter = CodeInterpreterTool + code_interpreter = CodeInterpreterTool() code_interpreter.add_file(file.id) # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to view the file diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py index e11d1628bc4e..72f6eafedb3f 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py @@ -99,8 +99,8 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID {message.id}") - with ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id, stream=True, event_handler=MyEventHandler() + with ai_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() ) as stream: stream.until_done() diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index b9274118f35b..52331386e780 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -112,11 +112,10 @@ def _handle_submit_tool_outputs(self, run: "ThreadRun") -> None: print(f"Tool outputs: {tool_outputs}") if tool_outputs: - with self._agents.submit_tool_outputs_to_run( + with self._agents.submit_tool_outputs_to_stream( thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, - stream=True, event_handler=self ) as stream: stream.until_done() @@ -139,10 +138,9 @@ def _handle_submit_tool_outputs(self, run: "ThreadRun") -> None: message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, send an email with the datetime and weather information in New York? Also let me know the details") print(f"Created message, message ID {message.id}") - with ai_client.agents.create_and_process_run( + with ai_client.agents.create_stream( thread_id=thread.id, assistant_id=agent.id, - stream=True, event_handler=MyEventHandler(ai_client.agents) ) as stream: stream.until_done() diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py index 416c2dfe3c31..3fba6a251eda 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py @@ -70,7 +70,7 @@ message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID {message.id}") - with ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, stream=True) as stream: + with ai_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: for event_type, event_data in stream: diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py index 6033779f5184..880420249ae2 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py @@ -57,11 +57,10 @@ # Function to handle tool stream iteration def handle_submit_tool_outputs(operatiions: AgentsOperations, thread_id, run_id, tool_outputs): try: - with operatiions.submit_tool_outputs_to_run( + with operatiions.submit_tool_outputs_to_stream( thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, - stream=True ) as tool_stream: for tool_event_type, tool_event_data in tool_stream: if tool_event_type == AgentStreamEvent.ERROR: @@ -101,7 +100,7 @@ def handle_message_delta(delta: MessageDeltaChunk) -> None: message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what's the time?") print(f"Created message, message ID {message.id}") - with ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, stream=True) as stream: + with ai_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: for event_type, event_data in stream: From e2040020f224957505f04de84c01229b3e5c1042 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 11 Oct 2024 10:13:32 -0700 Subject: [PATCH 025/138] Miscellaneous updates (#37852) --- .../azure/ai/client/_client.py | 12 +- .../azure/ai/client/_configuration.py | 12 +- .../azure-ai-client/azure/ai/client/_patch.py | 33 ++-- .../azure/ai/client/aio/_client.py | 12 +- .../azure/ai/client/aio/_configuration.py | 12 +- .../azure/ai/client/aio/_patch.py | 33 ++-- .../ai/client/aio/operations/_operations.py | 120 +++++++-------- .../azure/ai/client/aio/operations/_patch.py | 17 ++- .../azure/ai/client/models/__init__.py | 4 + .../azure/ai/client/models/_enums.py | 8 +- .../azure/ai/client/models/_models.py | 61 ++++---- .../azure/ai/client/models/_patch.py | 6 +- .../azure/ai/client/operations/_operations.py | 120 +++++++-------- .../azure/ai/client/operations/_patch.py | 14 +- .../sample_agents_basics_async.py | 4 +- .../sample_agents_functions_async.py | 4 +- ...sample_agents_stream_eventhandler_async.py | 4 +- ..._stream_eventhandler_with_toolset_async.py | 4 +- .../sample_agents_stream_iteration_async.py | 4 +- ...gents_with_file_search_attachment_async.py | 4 +- .../samples/agents/sample_agents_basics.py | 16 +- ...mple_agents_code_interpreter_attachment.py | 4 +- .../agents/sample_agents_file_search.py | 4 +- .../samples/agents/sample_agents_functions.py | 16 +- .../agents/sample_agents_run_with_toolset.py | 16 +- .../sample_agents_stream_eventhandler.py | 16 +- ...agents_stream_eventhandler_with_toolset.py | 4 +- .../agents/sample_agents_stream_iteration.py | 16 +- ...le_agents_stream_iteration_with_toolset.py | 4 +- ...mple_agents_with_file_search_attachment.py | 4 +- .../async_samples/sample_endpoints_async.py | 144 ++++++++---------- .../samples/endpoints/sample_endpoints.py | 106 +++++++------ .../samples/evaluations/sample_evaluations.py | 2 +- .../sample_get_azure_openai_client_async.py | 16 +- ...ample_get_chat_completions_client_async.py | 15 +- .../sample_get_embeddings_client_async.py | 18 +-- .../sample_get_azure_openai_client.py | 47 +++--- .../sample_get_chat_completions_client.py | 25 +-- .../inference/sample_get_embeddings_client.py | 41 ++--- .../tests/endpoints/unit_tests.py | 4 +- sdk/ai/azure-ai-client/tsp-location.yaml | 2 +- 41 files changed, 415 insertions(+), 593 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/_client.py index bd5efc4521b1..39545e19ad8d 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_client.py @@ -41,8 +41,8 @@ class AzureAIClient: :type subscription_id: str :param resource_group_name: The name of the Azure Resource Group. Required. :type resource_group_name: str - :param workspace_name: The name of the Azure AI Studio hub. Required. - :type workspace_name: str + :param project_name: The Azure AI Studio project name. Required. + :type project_name: str :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is @@ -56,16 +56,16 @@ def __init__( endpoint: str, subscription_id: str, resource_group_name: str, - workspace_name: str, + project_name: str, credential: "TokenCredential", **kwargs: Any ) -> None: - _endpoint = "{endpoint}/{subscriptionId}/{resourceGroupName}/{workspaceName}" + _endpoint = "{endpoint}/{subscriptionId}/{resourceGroupName}/{projectName}" self._config = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, - workspace_name=workspace_name, + project_name=project_name, credential=credential, **kwargs ) @@ -120,7 +120,7 @@ def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py index ee10b245b611..dde212466722 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py @@ -32,8 +32,8 @@ class AzureAIClientConfiguration: # pylint: disable=too-many-instance-attribute :type subscription_id: str :param resource_group_name: The name of the Azure Resource Group. Required. :type resource_group_name: str - :param workspace_name: The name of the Azure AI Studio hub. Required. - :type workspace_name: str + :param project_name: The Azure AI Studio project name. Required. + :type project_name: str :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is @@ -47,7 +47,7 @@ def __init__( endpoint: str, subscription_id: str, resource_group_name: str, - workspace_name: str, + project_name: str, credential: "TokenCredential", **kwargs: Any ) -> None: @@ -59,15 +59,15 @@ def __init__( raise ValueError("Parameter 'subscription_id' must not be None.") if resource_group_name is None: raise ValueError("Parameter 'resource_group_name' must not be None.") - if workspace_name is None: - raise ValueError("Parameter 'workspace_name' must not be None.") + if project_name is None: + raise ValueError("Parameter 'project_name' must not be None.") if credential is None: raise ValueError("Parameter 'credential' must not be None.") self.endpoint = endpoint self.subscription_id = subscription_id self.resource_group_name = resource_group_name - self.workspace_name = workspace_name + self.project_name = project_name self.credential = credential self.api_version = api_version self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index 1ec9faeef28e..1e2704985699 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -25,7 +25,7 @@ def __init__( endpoint: str, subscription_id: str, resource_group_name: str, - workspace_name: str, + project_name: str, credential: "TokenCredential", **kwargs: Any, ) -> None: @@ -36,8 +36,8 @@ def __init__( raise ValueError("subscription_id ID is required") if not resource_group_name: raise ValueError("resource_group_name is required") - if not workspace_name: - raise ValueError("workspace_name is required") + if not project_name: + raise ValueError("project_name is required") if not credential: raise ValueError("Credential is required") if "api_version" in kwargs: @@ -50,12 +50,12 @@ def __init__( kwargs3 = kwargs.copy() # For Endpoints operations (enumerating connections, getting SAS tokens) - _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long + _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long self._config1 = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, - workspace_name=workspace_name, + project_name=project_name, credential=credential, api_version="2024-07-01-preview", credential_scopes=["https://management.azure.com"], @@ -81,12 +81,12 @@ def __init__( self._client1 = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) # For Agents operations - _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long + _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long self._config2 = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, - workspace_name=workspace_name, + project_name=project_name, credential=credential, api_version="2024-07-01-preview", # TODO: Update me credential_scopes=["https://ml.azure.com"], @@ -112,12 +112,12 @@ def __init__( self._client2 = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) # For Cloud Evaluations operations - _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long + _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long self._config3 = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, - workspace_name=workspace_name, + project_name=project_name, credential=credential, api_version="2024-07-01-preview", # TODO: Update me credential_scopes=["https://ml.azure.com"], # TODO: Update once service changes are ready @@ -167,24 +167,23 @@ def __exit__(self, *exc_details: Any) -> None: self._client2.__exit__(*exc_details) self._client3.__exit__(*exc_details) - @classmethod - def from_connection_string(cls, connection: str, credential: "TokenCredential", **kwargs) -> "AzureAIClient": + def from_connection_string(cls, conn_str: str, credential: "TokenCredential", **kwargs) -> "AzureAIClient": """ Create an AzureAIClient from a connection string. - :param connection: The connection string, copied from your AI Studio project. + :param conn_str: The connection string, copied from your AI Studio project. """ - if not connection: + if not conn_str: raise ValueError("Connection string is required") - parts = connection.split(";") + parts = conn_str.split(";") if len(parts) != 4: raise ValueError("Invalid connection string format") - endpoint = parts[0] + endpoint = "https://" + parts[0] subscription_id = parts[1] resource_group_name = parts[2] - workspace_name = parts[3] - return cls(endpoint, subscription_id, resource_group_name, workspace_name, credential, **kwargs) + project_name = parts[3] + return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) __all__: List[str] = [ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py index 53eb218b6823..0e194be24fc6 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py @@ -41,8 +41,8 @@ class AzureAIClient: :type subscription_id: str :param resource_group_name: The name of the Azure Resource Group. Required. :type resource_group_name: str - :param workspace_name: The name of the Azure AI Studio hub. Required. - :type workspace_name: str + :param project_name: The Azure AI Studio project name. Required. + :type project_name: str :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is @@ -56,16 +56,16 @@ def __init__( endpoint: str, subscription_id: str, resource_group_name: str, - workspace_name: str, + project_name: str, credential: "AsyncTokenCredential", **kwargs: Any ) -> None: - _endpoint = "{endpoint}/{subscriptionId}/{resourceGroupName}/{workspaceName}" + _endpoint = "{endpoint}/{subscriptionId}/{resourceGroupName}/{projectName}" self._config = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, - workspace_name=workspace_name, + project_name=project_name, credential=credential, **kwargs ) @@ -122,7 +122,7 @@ def send_request( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py index eb7879780472..8356c07ba7ee 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py @@ -32,8 +32,8 @@ class AzureAIClientConfiguration: # pylint: disable=too-many-instance-attribute :type subscription_id: str :param resource_group_name: The name of the Azure Resource Group. Required. :type resource_group_name: str - :param workspace_name: The name of the Azure AI Studio hub. Required. - :type workspace_name: str + :param project_name: The Azure AI Studio project name. Required. + :type project_name: str :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is @@ -47,7 +47,7 @@ def __init__( endpoint: str, subscription_id: str, resource_group_name: str, - workspace_name: str, + project_name: str, credential: "AsyncTokenCredential", **kwargs: Any ) -> None: @@ -59,15 +59,15 @@ def __init__( raise ValueError("Parameter 'subscription_id' must not be None.") if resource_group_name is None: raise ValueError("Parameter 'resource_group_name' must not be None.") - if workspace_name is None: - raise ValueError("Parameter 'workspace_name' must not be None.") + if project_name is None: + raise ValueError("Parameter 'project_name' must not be None.") if credential is None: raise ValueError("Parameter 'credential' must not be None.") self.endpoint = endpoint self.subscription_id = subscription_id self.resource_group_name = resource_group_name - self.workspace_name = workspace_name + self.project_name = project_name self.credential = credential self.api_version = api_version self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py index 1201648eb3f9..06729aba0632 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py @@ -25,7 +25,7 @@ def __init__( endpoint: str, subscription_id: str, resource_group_name: str, - workspace_name: str, + project_name: str, credential: "AsyncTokenCredential", **kwargs: Any, ) -> None: @@ -36,8 +36,8 @@ def __init__( raise ValueError("subscription_id ID is required") if not resource_group_name: raise ValueError("resource_group_name is required") - if not workspace_name: - raise ValueError("workspace_name is required") + if not project_name: + raise ValueError("project_name is required") if not credential: raise ValueError("Credential is required") if "api_version" in kwargs: @@ -50,12 +50,12 @@ def __init__( kwargs3 = kwargs.copy() # For Endpoints operations (enumerating connections, getting SAS tokens) - _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long + _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long self._config1 = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, - workspace_name=workspace_name, + project_name=project_name, credential=credential, api_version="2024-07-01-preview", credential_scopes=["https://management.azure.com"], @@ -81,12 +81,12 @@ def __init__( self._client1 = AsyncPipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) # For Agents operations - _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long + _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long self._config2 = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, - workspace_name=workspace_name, + project_name=project_name, credential=credential, api_version="2024-07-01-preview", # TODO: Update me credential_scopes=["https://ml.azure.com"], @@ -112,12 +112,12 @@ def __init__( self._client2 = AsyncPipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) # For Cloud Evaluations operations - _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" # pylint: disable=line-too-long + _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long self._config3 = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, - workspace_name=workspace_name, + project_name=project_name, credential=credential, api_version="2024-07-01-preview", # TODO: Update me credential_scopes=["https://management.azure.com"], # TODO: Update once service changes are ready @@ -168,22 +168,23 @@ async def __aexit__(self, *exc_details: Any) -> None: await self._client3.__aexit__(*exc_details) @classmethod - def from_connection_string(cls, connection: str, credential: "AzureTokenCredential", **kwargs) -> "AzureAIClient": + def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential", **kwargs) -> "AzureAIClient": """ Create an asynchronous AzureAIClient from a connection string. - :param connection: The connection string, copied from your AI Studio project. + :param conn_str: The connection string, copied from your AI Studio project. """ - if not connection: + if not conn_str: raise ValueError("Connection string is required") - parts = connection.split(";") + parts = conn_str.split(";") if len(parts) != 4: raise ValueError("Invalid connection string format") - endpoint = parts[0] + endpoint = "https://" + parts[0] subscription_id = parts[1] resource_group_name = parts[2] - workspace_name = parts[3] - return cls(endpoint, subscription_id, resource_group_name, workspace_name, credential, **kwargs) + project_name = parts[3] + return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) + __all__: List[str] = [ "AzureAIClient", diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py index f29d76e51ded..5a747e46f845 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py @@ -320,7 +320,7 @@ async def create_agent( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -411,7 +411,7 @@ async def list_agents( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -476,7 +476,7 @@ async def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -725,7 +725,7 @@ async def update_agent( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -790,7 +790,7 @@ async def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentD "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -955,7 +955,7 @@ async def create_thread( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1020,7 +1020,7 @@ async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1188,7 +1188,7 @@ async def update_thread( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1253,7 +1253,7 @@ async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDe "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1449,7 +1449,7 @@ async def create_message( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1548,7 +1548,7 @@ async def list_messages( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1616,7 +1616,7 @@ async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _ "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1781,7 +1781,7 @@ async def update_message( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2105,7 +2105,7 @@ async def create_run( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2200,7 +2200,7 @@ async def list_runs( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2268,7 +2268,7 @@ async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.T "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2433,7 +2433,7 @@ async def update_run( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2610,7 +2610,7 @@ async def submit_tool_outputs_to_run( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2678,7 +2678,7 @@ async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _model "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2989,7 +2989,7 @@ async def create_thread_and_run( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3060,7 +3060,7 @@ async def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3159,7 +3159,7 @@ async def list_run_steps( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3228,7 +3228,7 @@ async def list_files( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3354,7 +3354,7 @@ async def upload_file( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3419,7 +3419,7 @@ async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletion "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3484,7 +3484,7 @@ async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3549,7 +3549,7 @@ async def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileCon "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3640,7 +3640,7 @@ async def list_vector_stores( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3817,7 +3817,7 @@ async def create_vector_store( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3882,7 +3882,7 @@ async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4048,7 +4048,7 @@ async def modify_vector_store( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4114,7 +4114,7 @@ async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _mod "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4214,7 +4214,7 @@ async def list_vector_store_files( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4372,7 +4372,7 @@ async def create_vector_store_file( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4440,7 +4440,7 @@ async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwar "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4513,7 +4513,7 @@ async def delete_vector_store_file( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4671,7 +4671,7 @@ async def create_vector_store_file_batch( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4741,7 +4741,7 @@ async def get_vector_store_file_batch( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4812,7 +4812,7 @@ async def cancel_vector_store_file_batch( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4916,7 +4916,7 @@ async def list_vector_store_file_batch_files( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4996,7 +4996,7 @@ async def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5139,7 +5139,7 @@ async def _list_secrets( # pylint: disable=protected-access "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5282,7 +5282,7 @@ async def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], * "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5359,9 +5359,7 @@ def prepare_request(next_link=None): "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url( - "self._config.workspace_name", self._config.workspace_name, "str" - ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5386,9 +5384,7 @@ def prepare_request(next_link=None): "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url( - "self._config.workspace_name", self._config.workspace_name, "str" - ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5527,7 +5523,7 @@ async def update( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5592,7 +5588,7 @@ async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5717,7 +5713,7 @@ async def create_schedule( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5782,7 +5778,7 @@ async def get_schedule(self, id: str, **kwargs: Any) -> _models.EvaluationSchedu "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5859,9 +5855,7 @@ def prepare_request(next_link=None): "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url( - "self._config.workspace_name", self._config.workspace_name, "str" - ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5886,9 +5880,7 @@ def prepare_request(next_link=None): "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url( - "self._config.workspace_name", self._config.workspace_name, "str" - ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5968,9 +5960,7 @@ def prepare_request(next_link=None): "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url( - "self._config.workspace_name", self._config.workspace_name, "str" - ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5995,9 +5985,7 @@ def prepare_request(next_link=None): "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url( - "self._config.workspace_name", self._config.workspace_name, "str" - ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6062,7 +6050,7 @@ async def delete_schedule(self, id: str, **kwargs: Any) -> None: "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index 83bdd5c11161..2f3fe6fc6154 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -52,6 +52,7 @@ async def get_chat_completions_client(self) -> "ChatCompletionsClient": "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" ) from azure.core.credentials import AzureKeyCredential + client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) elif endpoint.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth @@ -122,6 +123,10 @@ async def get_azure_openai_client(self) -> "AsyncAzureOpenAI": except ModuleNotFoundError as _: raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai-async'") + # Pick latest GA version from the "Data plane - Inference" row in the table + # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + AZURE_OPENAI_API_VERSION = "2024-06-01" + if endpoint.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" @@ -129,7 +134,7 @@ async def get_azure_openai_client(self) -> "AsyncAzureOpenAI": client = AsyncAzureOpenAI( api_key=endpoint.key, azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", # TODO: Is this needed? + api_version=AZURE_OPENAI_API_VERSION ) elif endpoint.authentication_type == AuthenticationType.AAD: logger.debug( @@ -147,7 +152,7 @@ async def get_azure_openai_client(self) -> "AsyncAzureOpenAI": endpoint.token_credential, "https://cognitiveservices.azure.com/.default" ), azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", + api_version=AZURE_OPENAI_API_VERSION, ) elif endpoint.authentication_type == AuthenticationType.SAS: logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") @@ -156,7 +161,7 @@ async def get_azure_openai_client(self) -> "AsyncAzureOpenAI": endpoint.token_credential, "https://cognitiveservices.azure.com/.default" ), azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", + api_version=AZURE_OPENAI_API_VERSION ) else: raise ValueError("Unknown authentication type") @@ -183,19 +188,20 @@ async def get(self, *, endpoint_name: str, populate_secrets: bool = False) -> En connection_name=endpoint_name, subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, - workspace_name=self._config.workspace_name, + workspace_name=self._config.project_name, api_version_in_body=self._config.api_version, ) if connection.properties.auth_type == AuthenticationType.AAD: return EndpointProperties(connection=connection, token_credential=self._config.credential) elif connection.properties.auth_type == AuthenticationType.SAS: from ...models._patch import SASTokenCredential + token_credential = SASTokenCredential( sas_token=connection.properties.credentials.sas, credential=self._config.credential, subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, - workspace_name=self._config.workspace_name, + project_name=self._config.project_name, connection_name=endpoint_name, ) return EndpointProperties(connection=connection, token_credential=token_credential) @@ -1707,4 +1713,3 @@ def patch_sdk(): you can't accomplish using the techniques described in https://aka.ms/azsdk/python/dpcodegen/python/customize """ - diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py index 61340ac38b60..ecf84301a862 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py @@ -138,7 +138,9 @@ from ._enums import AgentsApiToolChoiceOptionMode from ._enums import AgentsNamedToolChoiceType from ._enums import ApiResponseFormat +from ._enums import AuthenticationType from ._enums import DoneEvent +from ._enums import EndpointType from ._enums import ErrorEvent from ._enums import FilePurpose from ._enums import FileState @@ -302,7 +304,9 @@ "AgentsApiToolChoiceOptionMode", "AgentsNamedToolChoiceType", "ApiResponseFormat", + "AuthenticationType", "DoneEvent", + "EndpointType", "ErrorEvent", "FilePurpose", "FileState", diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py index a47264fce831..676321444449 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py @@ -125,7 +125,7 @@ class ApiResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): class AuthenticationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """to do.""" + """Authentication type used by Azure AI service to connect to another service.""" API_KEY = "ApiKey" """API Key authentication""" @@ -146,11 +146,9 @@ class EndpointType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The Type (or category) of the connection.""" AZURE_OPEN_AI = "AzureOpenAI" - """Azure OpenAI""" + """Azure OpenAI service""" SERVERLESS = "Serverless" - """Serverless API""" - AGENT = "Agent" - """Agent""" + """Serverless API service""" class ErrorEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py index 9382a7a409c9..1ba759bda534 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py @@ -539,7 +539,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles class ConnectionProperties(_model_base.Model): - """to do. + """Connetion properties. You probably want to use the sub-classes and not this class directly. Known sub-classes are: ConnectionPropertiesAADAuth, ConnectionPropertiesApiKeyAuth, ConnectionPropertiesSASAuth @@ -564,20 +564,19 @@ class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): :ivar auth_type: Authentication type of the connection target. Required. Entra ID authentication :vartype auth_type: str or ~azure.ai.client.models.AAD - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", and "Agent". + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and + "Serverless". :vartype category: str or ~azure.ai.client.models.EndpointType - :ivar target: to do. Required. + :ivar target: The connection URL to be used for this service. Required. :vartype target: str """ auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. Entra ID authentication""" - category: Union[str, "_models._enums.EndpointType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", and - \"Agent\".""" + category: Union[str, "_models.EndpointType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" target: str = rest_field() - """to do. Required.""" + """The connection URL to be used for this service. Required.""" class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey"): @@ -586,24 +585,23 @@ class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey :ivar auth_type: Authentication type of the connection target. Required. API Key authentication :vartype auth_type: str or ~azure.ai.client.models.API_KEY - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", and "Agent". + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and + "Serverless". :vartype category: str or ~azure.ai.client.models.EndpointType :ivar credentials: Credentials will only be present for authType=ApiKey. Required. :vartype credentials: ~azure.ai.client.models._models.CredentialsApiKeyAuth - :ivar target: to do. Required. + :ivar target: The connection URL to be used for this service. Required. :vartype target: str """ auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. API Key authentication""" - category: Union[str, "_models._enums.EndpointType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", and - \"Agent\".""" + category: Union[str, "_models.EndpointType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" target: str = rest_field() - """to do. Required.""" + """The connection URL to be used for this service. Required.""" class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): @@ -613,41 +611,40 @@ class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): :ivar auth_type: Authentication type of the connection target. Required. Shared Access Signature (SAS) authentication :vartype auth_type: str or ~azure.ai.client.models.SAS - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", and "Agent". + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and + "Serverless". :vartype category: str or ~azure.ai.client.models.EndpointType :ivar credentials: Credentials will only be present for authType=ApiKey. Required. :vartype credentials: ~azure.ai.client.models._models.CredentialsSASAuth - :ivar target: to do. Required. + :ivar target: The connection URL to be used for this service. Required. :vartype target: str """ auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. Shared Access Signature (SAS) authentication""" - category: Union[str, "_models._enums.EndpointType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", and - \"Agent\".""" + category: Union[str, "_models.EndpointType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" credentials: "_models._models.CredentialsSASAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" target: str = rest_field() - """to do. Required.""" + """The connection URL to be used for this service. Required.""" class ConnectionsListResponse(_model_base.Model): - """to do. + """Response from the list operation. - :ivar value: to do. Required. + :ivar value: A list of connection list secrets. Required. :vartype value: list[~azure.ai.client.models._models.ConnectionsListSecretsResponse] """ value: List["_models._models.ConnectionsListSecretsResponse"] = rest_field() - """to do. Required.""" + """A list of connection list secrets. Required.""" class ConnectionsListSecretsResponse(_model_base.Model): - """to do. + """Response from the listSecrets operation. :ivar name: The name of the resource. Required. @@ -663,27 +660,27 @@ class ConnectionsListSecretsResponse(_model_base.Model): class CredentialsApiKeyAuth(_model_base.Model): - """to do. + """The credentials needed for API key authentication. - :ivar key: to do. Required. + :ivar key: The API key. Required. :vartype key: str """ key: str = rest_field() - """to do. Required.""" + """The API key. Required.""" class CredentialsSASAuth(_model_base.Model): - """to do. + """The credentials neede for Shared Access Signatures (SAS) authentication. - :ivar sas: to do. Required. + :ivar sas: The Shared Access Signatures (SAS) token. Required. :vartype sas: str """ sas: str = rest_field(name="SAS") - """to do. Required.""" + """The Shared Access Signatures (SAS) token. Required.""" class Dataset(InputData, discriminator="dataset"): diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index 26b197188a38..bd5acd50c2d7 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -81,14 +81,14 @@ def __init__( credential: TokenCredential, subscription_id: str, resource_group_name: str, - workspace_name: str, + project_name: str, connection_name: str, ): self._sas_token = sas_token self._credential = credential self._subscription_id = subscription_id self._resource_group_name = resource_group_name - self._workspace_name = workspace_name + self._project_name = project_name self._connection_name = connection_name self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) @@ -112,7 +112,7 @@ def _refresh_token(self) -> None: endpoint="not-needed", # Since we are only going to use the "endpoints" operations, we don't need to supply an endpoint. http://management.azure.com is hard coded in the SDK. subscription_id=self._subscription_id, resource_group_name=self._resource_group_name, - workspace_name=self._workspace_name, + project_name=self._project_name, ) connection = ai_client.endpoints.get(endpoint_name=self._connection_name, populate_secrets=True) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py index c71ab5334762..2d3adab9ec1f 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py @@ -1658,7 +1658,7 @@ def create_agent( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1749,7 +1749,7 @@ def list_agents( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -1814,7 +1814,7 @@ def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2063,7 +2063,7 @@ def update_agent( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2128,7 +2128,7 @@ def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletio "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2293,7 +2293,7 @@ def create_thread( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2358,7 +2358,7 @@ def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2526,7 +2526,7 @@ def update_thread( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2591,7 +2591,7 @@ def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletion "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2787,7 +2787,7 @@ def create_message( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2886,7 +2886,7 @@ def list_messages( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -2954,7 +2954,7 @@ def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3119,7 +3119,7 @@ def update_message( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3443,7 +3443,7 @@ def create_run( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3538,7 +3538,7 @@ def list_runs( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3606,7 +3606,7 @@ def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadR "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3771,7 +3771,7 @@ def update_run( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -3948,7 +3948,7 @@ def submit_tool_outputs_to_run( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4016,7 +4016,7 @@ def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.Thre "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4327,7 +4327,7 @@ def create_thread_and_run( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4398,7 +4398,7 @@ def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs: Any) "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4497,7 +4497,7 @@ def list_run_steps( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4566,7 +4566,7 @@ def list_files( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4692,7 +4692,7 @@ def upload_file( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4757,7 +4757,7 @@ def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4822,7 +4822,7 @@ def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4887,7 +4887,7 @@ def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentRe "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -4978,7 +4978,7 @@ def list_vector_stores( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5155,7 +5155,7 @@ def create_vector_store( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5220,7 +5220,7 @@ def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Vecto "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5386,7 +5386,7 @@ def modify_vector_store( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5452,7 +5452,7 @@ def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.Ve "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5552,7 +5552,7 @@ def list_vector_store_files( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5710,7 +5710,7 @@ def create_vector_store_file( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5778,7 +5778,7 @@ def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: An "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -5851,7 +5851,7 @@ def delete_vector_store_file( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6009,7 +6009,7 @@ def create_vector_store_file_batch( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6079,7 +6079,7 @@ def get_vector_store_file_batch( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6150,7 +6150,7 @@ def cancel_vector_store_file_batch( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6254,7 +6254,7 @@ def list_vector_store_file_batch_files( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6334,7 +6334,7 @@ def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: # py "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6477,7 +6477,7 @@ def _list_secrets( # pylint: disable=protected-access "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6618,7 +6618,7 @@ def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwarg "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6695,9 +6695,7 @@ def prepare_request(next_link=None): "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url( - "self._config.workspace_name", self._config.workspace_name, "str" - ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6722,9 +6720,7 @@ def prepare_request(next_link=None): "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url( - "self._config.workspace_name", self._config.workspace_name, "str" - ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6863,7 +6859,7 @@ def update( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -6928,7 +6924,7 @@ def get(self, id: str, **kwargs: Any) -> _models.Evaluation: "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7053,7 +7049,7 @@ def create_schedule( "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7118,7 +7114,7 @@ def get_schedule(self, id: str, **kwargs: Any) -> _models.EvaluationSchedule: "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7195,9 +7191,7 @@ def prepare_request(next_link=None): "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url( - "self._config.workspace_name", self._config.workspace_name, "str" - ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7222,9 +7216,7 @@ def prepare_request(next_link=None): "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url( - "self._config.workspace_name", self._config.workspace_name, "str" - ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7304,9 +7296,7 @@ def prepare_request(next_link=None): "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url( - "self._config.workspace_name", self._config.workspace_name, "str" - ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7331,9 +7321,7 @@ def prepare_request(next_link=None): "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url( - "self._config.workspace_name", self._config.workspace_name, "str" - ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) @@ -7398,7 +7386,7 @@ def delete_schedule(self, id: str, **kwargs: Any) -> None: # pylint: disable=in "resourceGroupName": self._serialize.url( "self._config.resource_group_name", self._config.resource_group_name, "str" ), - "workspaceName": self._serialize.url("self._config.workspace_name", self._config.workspace_name, "str"), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), } _request.url = self._client.format_url(_request.url, **path_format_arguments) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index dd351ad9b3a8..b5133b04d620 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -134,6 +134,10 @@ def get_azure_openai_client(self) -> "AzureOpenAI": except ModuleNotFoundError as _: raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai'") + # Pick latest GA version from the "Data plane - Inference" row in the table + # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + AZURE_OPENAI_API_VERSION = "2024-06-01" + if endpoint.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" @@ -141,7 +145,7 @@ def get_azure_openai_client(self) -> "AzureOpenAI": client = AzureOpenAI( api_key=endpoint.key, azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", # TODO: Is this needed? + api_version=AZURE_OPENAI_API_VERSION ) elif endpoint.authentication_type == AuthenticationType.AAD: logger.debug( @@ -159,7 +163,7 @@ def get_azure_openai_client(self) -> "AzureOpenAI": endpoint.token_credential, "https://cognitiveservices.azure.com/.default" ), azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", + api_version=AZURE_OPENAI_API_VERSION ) elif endpoint.authentication_type == AuthenticationType.SAS: logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") @@ -168,7 +172,7 @@ def get_azure_openai_client(self) -> "AzureOpenAI": endpoint.token_credential, "https://cognitiveservices.azure.com/.default" ), azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", + api_version=AZURE_OPENAI_API_VERSION ) else: raise ValueError("Unknown authentication type") @@ -197,7 +201,7 @@ def get(self, *, endpoint_name: str, populate_secrets: bool = False) -> Endpoint connection_name=endpoint_name, subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, - workspace_name=self._config.workspace_name, + workspace_name=self._config.project_name, api_version_in_body=self._config.api_version, ) if connection.properties.auth_type == AuthenticationType.AAD: @@ -210,7 +214,7 @@ def get(self, *, endpoint_name: str, populate_secrets: bool = False) -> Endpoint credential=self._config.credential, subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, - workspace_name=self._config.workspace_name, + project_name=self._config.project_name, connection_name=endpoint_name, ) return EndpointProperties(connection=connection, token_credential=token_credential) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py index 2e89732c94ff..fd80e5077a43 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py @@ -34,11 +34,9 @@ async def main(): # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Or, you can create the Azure AI Client by giving all required parameters directly diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py index 4aeb19e4d894..611da3150afe 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py @@ -36,11 +36,9 @@ async def main(): # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Or, you can create the Azure AI Client by giving all required parameters directly diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py index 63476c3cacd1..8d11336e1dad 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -61,11 +61,9 @@ async def main(): # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Or, you can create the Azure AI Client by giving all required parameters directly diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py index 777331e2afd0..c6d8cd869b59 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py @@ -102,11 +102,9 @@ async def main(): # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Or, you can create the Azure AI Client by giving all required parameters directly diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py index fc6cc13efa93..6d1cf2fd89cb 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py @@ -34,11 +34,9 @@ async def main(): # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Or, you can create the Azure AI Client by giving all required parameters directly diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py index 2e5df870ed82..3342d0741305 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -34,11 +34,9 @@ async def main(): # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Or, you can create the Azure AI Client by giving all required parameters directly diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py index e3df8d7aa06b..f0d4e68d6219 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py @@ -29,24 +29,10 @@ # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables -connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, -) - -# Or, you can create the Azure AI Client by giving all required parameters directly -""" -ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], ) -""" with ai_client: agent = ai_client.agents.create_agent( diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py index fda7c863fc56..5f36c9de0e9e 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py @@ -33,11 +33,9 @@ # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables -connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Or, you can create the Azure AI Client by giving all required parameters directly diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py index 599d9cb73696..3416906e0dbd 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py @@ -36,11 +36,9 @@ # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables -connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Or, you can create the Azure AI Client by giving all required parameters directly diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py index c2a9ee348fb6..585874f0bf90 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py @@ -31,24 +31,10 @@ # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables -connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, -) - -# Or, you can create the Azure AI Client by giving all required parameters directly -""" -ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) -""" # Initialize function tool with user functions functions = FunctionTool(functions=user_functions) diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py index 34ee7dc290f7..75df4ab5f3cb 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py @@ -32,24 +32,10 @@ # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables -connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, -) - -# Or, you can create the Azure AI Client by giving all required parameters directly -""" -ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], ) -""" # Initialize agent toolset with user functions and code interpreter functions = FunctionTool(user_functions) diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py index 72f6eafedb3f..eeecf71292b5 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py @@ -41,24 +41,10 @@ # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables -connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, -) - -# Or, you can create the Azure AI Client by giving all required parameters directly -""" -ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], ) -""" class MyEventHandler(AgentEventHandler): def on_message_delta(self, delta: "MessageDeltaChunk") -> None: diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index 52331386e780..1071cd24aa58 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -40,11 +40,9 @@ # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables -connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Or, you can create the Azure AI Client by giving all required parameters directly diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py index 3fba6a251eda..dc1036017b09 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py @@ -38,24 +38,10 @@ # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables -connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, -) - -# Or, you can create the Azure AI Client by giving all required parameters directly -""" -ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], ) -""" with ai_client: # Create an agent and run stream with iteration diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py index 880420249ae2..a34fb5c1912e 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py @@ -35,11 +35,9 @@ # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables -connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Or, you can create the Azure AI Client by giving all required parameters directly diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py index 97de3ba1382e..293d32c539b7 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py @@ -33,11 +33,9 @@ # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables -connection_string = os.environ["AI_CLIENT_CONNECTION_STRING"] - ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=connection_string, + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Or, you can create the Azure AI Client by giving all required parameters directly diff --git a/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py b/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py index 2ee646d8eb7e..66cc29821061 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py @@ -4,13 +4,14 @@ # ------------------------------------ """ -FILE: sample_endpoints_async +FILE: sample_endpoints_async.py DESCRIPTION: - This sample demonstrates how to enumerate and get endpoints from an AzureAIClient. + Given an asynchronous AzureAIClient, this sample demonstrates how to enumerate endpoints + and get endpoint properties. USAGE: - python sample_endpoints_async + python sample_endpoints_async.py Before running the sample: @@ -24,11 +25,7 @@ import os from azure.ai.client.aio import AzureAIClient from azure.ai.client.models import EndpointType, AuthenticationType -from openai import AzureOpenAI -from azure.ai.inference.aio import ChatCompletionsClient -from azure.ai.inference.models import UserMessage -from azure.identity import DefaultAzureCredential, get_bearer_token_provider -from azure.core.credentials import AzureKeyCredential +from azure.identity import DefaultAzureCredential async def sample_endpoints_async(): @@ -36,18 +33,9 @@ async def sample_endpoints_async(): # It should be in the format ";;;" async with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], ) as ai_client: - # Or, you can create the Azure AI Client by giving all required parameters directly - # async with AzureAIClient( - # credential=DefaultAzureCredential(), - # endpoint=os.environ["AI_CLIENT_ENDPOINT"], - # subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - # resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - # workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - # ) as ai_client: - # List all endpoints of a particular "type", with or without their credentials: print("====> Listing of all Azure Open AI endpoints:") async for endpoint in ai_client.endpoints.list( @@ -59,83 +47,85 @@ async def sample_endpoints_async(): # Get the default endpoint of a particular "type" (note that since at the moment the service # does not have a notion of a default endpoint, this will return the first endpoint of that type): endpoint = await ai_client.endpoints.get_default( - endpoint_type=EndpointType.AZURE_OPEN_AI, populate_secrets=True # Required. # Optional. Defaults to "False" + endpoint_type=EndpointType.AZURE_OPEN_AI, + populate_secrets=True, # Required. # Optional. Defaults to "False" ) print("====> Get default Azure Open AI endpoint:") print(endpoint) # Get an endpoint by its name: endpoint = await ai_client.endpoints.get( - endpoint_name=os.environ["AI_CLIENT_CONNECTION_NAME"], populate_secrets=True # Required. + endpoint_name=os.environ["AI_CLIENT_ENDPOINT_NAME"], populate_secrets=True # Required. ) print("====> Get endpoint by name:") print(endpoint) - exit() - - # Here is how you would create the appropriate AOAI or Inference SDK for these endpoint - if endpoint.endpoint_type == EndpointType.AZURE_OPEN_AI: - - if endpoint.authentication_type == AuthenticationType.API_KEY: - print("====> Creating AzureOpenAI client using API key authentication") - client = AzureOpenAI( - api_key=endpoint.key, - azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", # TODO: Is this needed? - ) - elif endpoint.authentication_type == AuthenticationType.AAD: - print("====> Creating AzureOpenAI client using Entra ID authentication") - client = AzureOpenAI( - # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider( - endpoint.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", - ) - elif endpoint.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. - print("====> Creating AzureOpenAI client using SAS authentication") - client = AzureOpenAI( - azure_ad_token_provider=get_bearer_token_provider( - endpoint.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", - ) - - response = client.chat.completions.create( - model="gpt-4o", - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], + # Examples of how you would create Inference client + if endpoint.endpoint_type == EndpointType.AZURE_OPEN_AI: + + from openai import AsyncAzureOpenAI + + if endpoint.authentication_type == AuthenticationType.API_KEY: + print("====> Creating AzureOpenAI client using API key authentication") + client = AsyncAzureOpenAI( + api_key=endpoint.key, + azure_endpoint=endpoint.endpoint_url, + api_version="2024-06-01" # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs ) + elif endpoint.authentication_type == AuthenticationType.AAD: + print("====> Creating AzureOpenAI client using Entra ID authentication") + from azure.identity import get_bearer_token_provider + client = AsyncAzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider( + endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=endpoint.endpoint_url, + api_version="2024-06-01" # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + ) + else: + raise ValueError(f"Authentication type {endpoint.authentication_type} not supported.") + + response = await client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + print(response.choices[0].message.content) - print(response.choices[0].message.content) + elif endpoint.endpoint_type == EndpointType.SERVERLESS: - elif endpoint.endpoint_type == EndpointType.SERVERLESS: + from azure.ai.inference.aio import ChatCompletionsClient + from azure.ai.inference.models import UserMessage - if endpoint.authentication_type == AuthenticationType.API_KEY: - print("====> Creating ChatCompletionsClient using API key authentication") - client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) - elif endpoint.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - print("====> Creating ChatCompletionsClient using Entra ID authentication") - client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential) - elif endpoint.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. - print("====> Creating ChatCompletionsClient using SAS authentication") - client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=endpoint.token_credential) + if endpoint.authentication_type == AuthenticationType.API_KEY: + print("====> Creating ChatCompletionsClient using API key authentication") + from azure.core.credentials import AzureKeyCredential + client = ChatCompletionsClient( + endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key) + ) + elif endpoint.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + print("====> Creating ChatCompletionsClient using Entra ID authentication") + client = ChatCompletionsClient( + endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential + ) + else: + raise ValueError(f"Authentication type {endpoint.authentication_type} not supported.") - response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + await client.close() + print(response.choices[0].message.content) - print(response.choices[0].message.content) async def main(): await sample_endpoints_async() + if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file + asyncio.run(main()) + diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py index e19cc7191d84..51a49f309f42 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py @@ -3,6 +3,24 @@ # Licensed under the MIT License. # ------------------------------------ +""" +FILE: sample_endpoints.py + +DESCRIPTION: + Given an AzureAIClient, this sample demonstrates how to enumerate endpoints + and get endpoint properties. + +USAGE: + python sample_endpoints.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set the environment variables with your own values: + 1) AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + import os from azure.ai.client import AzureAIClient from azure.ai.client.models import EndpointType, AuthenticationType @@ -14,47 +32,37 @@ # Create an Azure AI Client from a connection string, copied from your AI Studio project. # It should be in the format ";;;" -ai_client = AzureAIClient.from_connection_string( +with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=os.environ["AI_CLIENT_CONNECTION_STRING"], -) - -# Or, you can create the Azure AI Client by giving all required parameters directly -# ai_client = AzureAIClient( -# credential=DefaultAzureCredential(), -# endpoint=os.environ["AI_CLIENT_ENDPOINT"], -# subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], -# resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], -# workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], -# ) - -# List all endpoints of a particular "type", with or without their credentials: -endpoints = ai_client.endpoints.list( - endpoint_type=EndpointType.AZURE_OPEN_AI, # Optional. Defaults to all types. - populate_secrets=True, # Optional. Defaults to "False" -) -print("====> Listing of all Azure Open AI endpoints:") -for endpoint in endpoints: - print(endpoint) + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], +) as ai_client: -# Get the default endpoint of a particular "type" (note that since at the moment the service -# does not have a notion of a default endpoint, this will return the first endpoint of that type): -endpoint = ai_client.endpoints.get_default( - endpoint_type=EndpointType.AZURE_OPEN_AI, populate_secrets=True # Required. # Optional. Defaults to "False" -) -print("====> Get default Azure Open AI endpoint:") -print(endpoint) + # List all endpoints of a particular "type", with or without their credentials: + endpoints = ai_client.endpoints.list( + endpoint_type=EndpointType.AZURE_OPEN_AI, # Optional. Defaults to all types. + populate_secrets=True, # Optional. Defaults to "False" + ) + print("====> Listing of all Azure Open AI endpoints:") + for endpoint in endpoints: + print(endpoint) -# Get an endpoint by its name: -endpoint = ai_client.endpoints.get( - endpoint_name=os.environ["AI_CLIENT_CONNECTION_NAME"], populate_secrets=True # Required. -) -print("====> Get endpoint by name:") -print(endpoint) + # Get the default endpoint of a particular "type" (note that since at the moment the service + # does not have a notion of a default endpoint, this will return the first endpoint of that type): + endpoint = ai_client.endpoints.get_default( + endpoint_type=EndpointType.AZURE_OPEN_AI, populate_secrets=True # Required. # Optional. Defaults to "False" + ) + print("====> Get default Azure Open AI endpoint:") + print(endpoint) + + # Get an endpoint by its name: + endpoint = ai_client.endpoints.get( + endpoint_name=os.environ["AI_CLIENT_ENDPOINT_NAME"], populate_secrets=True # Required. + ) + print("====> Get endpoint by name:") + print(endpoint) -exit() -# Here is how you would create the appropriate AOAI or Inference SDK for these endpoint +# Examples of how you would create Inference client if endpoint.endpoint_type == EndpointType.AZURE_OPEN_AI: if endpoint.authentication_type == AuthenticationType.API_KEY: @@ -62,7 +70,7 @@ client = AzureOpenAI( api_key=endpoint.key, azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", # TODO: Is this needed? + api_version="2024-06-01" # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs ) elif endpoint.authentication_type == AuthenticationType.AAD: print("====> Creating AzureOpenAI client using Entra ID authentication") @@ -72,18 +80,10 @@ endpoint.token_credential, "https://cognitiveservices.azure.com/.default" ), azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", - ) - elif endpoint.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. - print("====> Creating AzureOpenAI client using SAS authentication") - client = AzureOpenAI( - azure_ad_token_provider=get_bearer_token_provider( - endpoint.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=endpoint.endpoint_url, - api_version="2024-08-01-preview", + api_version="2024-06-01" # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs ) + else: + raise ValueError(f"Authentication type {endpoint.authentication_type} not supported.") response = client.chat.completions.create( model="gpt-4o", @@ -94,7 +94,7 @@ }, ], ) - + client.close() print(response.choices[0].message.content) elif endpoint.endpoint_type == EndpointType.SERVERLESS: @@ -106,11 +106,9 @@ # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential) - elif endpoint.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. - print("====> Creating ChatCompletionsClient using SAS authentication") - client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=endpoint.token_credential) + else: + raise ValueError(f"Authentication type {endpoint.authentication_type} not supported.") response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - + client.close() print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py index cf260c12bcc0..5f30c2717dda 100644 --- a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py +++ b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py @@ -17,7 +17,7 @@ # Create an Azure AI client ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=f"{Endpoint};{Subscription};{ResourceGroup};{Workspace}", + conn_str=f"{Endpoint};{Subscription};{ResourceGroup};{Workspace}", logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py index e2d03086acf5..3376675d4b5a 100644 --- a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py +++ b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py @@ -25,24 +25,14 @@ from azure.ai.client.aio import AzureAIClient from azure.identity import DefaultAzureCredential + async def sample_get_azure_openai_client_async(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # It should have the format ";;;" async with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], ) as ai_client: - # Or, you can create the Azure AI Client by giving all required parameters directly - # async with AzureAIClient( - # credential=DefaultAzureCredential(), - # endpoint=os.environ["AI_CLIENT_ENDPOINT"], - # subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - # resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - # workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - # ) as ai_client: - # Get an authenticated AsyncAzureOpenAI client for your default Azure OpenAI connection: async with await ai_client.inference.get_azure_openai_client() as client: @@ -58,8 +48,10 @@ async def sample_get_azure_openai_client_async(): print(response.choices[0].message.content) + async def main(): await sample_get_azure_openai_client_async() + if __name__ == "__main__": asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py index 27201ddef947..625b4b3fc7b1 100644 --- a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py +++ b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py @@ -26,24 +26,14 @@ from azure.ai.inference.models import UserMessage from azure.identity import DefaultAzureCredential + async def sample_get_chat_completions_client_async(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # It should have the format ";;;" async with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], ) as ai_client: - # Or, you can create the Azure AI Client by giving all required parameters directly - # async with AzureAIClient( - # credential=DefaultAzureCredential(), - # endpoint=os.environ["AI_CLIENT_ENDPOINT"], - # subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - # resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - # workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - # ) as ai_client: - # Get an authenticated async ChatCompletionsClient (from azure.ai.inference) for your default Serverless connection: async with await ai_client.inference.get_chat_completions_client() as client: @@ -54,5 +44,6 @@ async def sample_get_chat_completions_client_async(): async def main(): await sample_get_chat_completions_client_async() + if __name__ == "__main__": asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py index 051ba27eb26d..cd6022f4c8a2 100644 --- a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py +++ b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py @@ -25,24 +25,14 @@ from azure.ai.client.aio import AzureAIClient from azure.identity import DefaultAzureCredential + async def sample_get_embeddings_client_async(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # It should have the format ";;;" async with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], ) as ai_client: - # Or, you can create the Azure AI Client by giving all required parameters directly - # async with AzureAIClient( - # credential=DefaultAzureCredential(), - # endpoint=os.environ["AI_CLIENT_ENDPOINT"], - # subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - # resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - # workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - # ) as ai_client: - # Get an authenticated async azure.ai.inference embeddings client for your default Serverless connection: async with await ai_client.inference.get_embeddings_client() as client: @@ -59,5 +49,7 @@ async def sample_get_embeddings_client_async(): async def main(): await sample_get_embeddings_client_async() + if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file + asyncio.run(main()) + diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py index 73f8c00ccd6e..4d2c0dab80fd 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py @@ -24,33 +24,22 @@ from azure.ai.client import AzureAIClient from azure.identity import DefaultAzureCredential -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# It should have the format ";;;" -ai_client = AzureAIClient.from_connection_string( +with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=os.environ["AI_CLIENT_CONNECTION_STRING"], -) - -# Or, you can create the Azure AI Client by giving all required parameters directly -# ai_client = AzureAIClient( -# credential=DefaultAzureCredential(), -# endpoint=os.environ["AI_CLIENT_ENDPOINT"], -# subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], -# resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], -# workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], -# ) - -# Get an authenticated OpenAI client for your default Azure OpenAI connection: -client = ai_client.inference.get_azure_openai_client() - -response = client.chat.completions.create( - model="gpt-4-0613", - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], -) - -print(response.choices[0].message.content) + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], +) as ai_client: + + # Get an authenticated OpenAI client for your default Azure OpenAI connection: + with ai_client.inference.get_azure_openai_client() as client: + + response = client.chat.completions.create( + model="gpt-4-0613", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py index 20ac52c8dd0a..af508bd87571 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py @@ -25,25 +25,14 @@ from azure.ai.inference.models import UserMessage from azure.identity import DefaultAzureCredential -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# It should have the format ";;;" -ai_client = AzureAIClient.from_connection_string( +with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=os.environ["AI_CLIENT_CONNECTION_STRING"], -) + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], +) as ai_client: -# Or, you can create the Azure AI Client by giving all required parameters directly -# ai_client = AzureAIClient( -# credential=DefaultAzureCredential(), -# endpoint=os.environ["AI_CLIENT_ENDPOINT"], -# subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], -# resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], -# workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], -# ) + # Get an authenticated azure.ai.inference chat completions client for your default Serverless connection: + with ai_client.inference.get_chat_completions_client() as client: -# Get an authenticated azure.ai.inference chat completions client for your default Serverless connection: -client = ai_client.inference.get_chat_completions_client() + response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) -response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - -print(response.choices[0].message.content) + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py index cc6a306685ad..e1a1a22dae82 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py @@ -24,30 +24,19 @@ from azure.ai.client import AzureAIClient from azure.identity import DefaultAzureCredential -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# It should have the format ";;;" -ai_client = AzureAIClient.from_connection_string( +with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - connection=os.environ["AI_CLIENT_CONNECTION_STRING"], -) - -# Or, you can create the Azure AI Client by giving all required parameters directly -# ai_client = AzureAIClient( -# credential=DefaultAzureCredential(), -# endpoint=os.environ["AI_CLIENT_ENDPOINT"], -# subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], -# resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], -# workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], -# ) - -# Get an authenticated azure.ai.inference embeddings client for your default Serverless connection: -client = ai_client.inference.get_embeddings_client() - -response = client.embed(input=["first phrase", "second phrase", "third phrase"]) - -for item in response.data: - length = len(item.embedding) - print( - f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " - f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" - ) + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], +) as ai_client: + + # Get an authenticated azure.ai.inference embeddings client for your default Serverless connection: + with ai_client.inference.get_embeddings_client() as client: + + response = client.embed(input=["first phrase", "second phrase", "third phrase"]) + + for item in response.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) diff --git a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py index 7bd10564b4d0..341224aae4e5 100644 --- a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py +++ b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py @@ -75,7 +75,7 @@ def test_sas_token_credential_class_mocked(self, **kwargs): credential=FakeTokenCredential(), subscription_id="fake_subscription_id", resource_group_name="fake_resouce_group", - workspace_name="fake_workspace_name", + project_name="fake_project_name", connection_name="fake_connection_name", ) assert sas_token_credential._expires_on == sas_token_expiration @@ -106,7 +106,7 @@ def test_sas_token_credential_class_real(self, **kwargs): credential=None, subscription_id=None, resource_group_name=None, - workspace_name=None, + project_name=None, connection_name=None, ) diff --git a/sdk/ai/azure-ai-client/tsp-location.yaml b/sdk/ai/azure-ai-client/tsp-location.yaml index 2746197f6f58..283f9c571164 100644 --- a/sdk/ai/azure-ai-client/tsp-location.yaml +++ b/sdk/ai/azure-ai-client/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Client -commit: 075ca688a475e4fbbbcfc2af8aa18bd6a9ff7680 +commit: 6335d810e99108296eb601a9b7191b08b9368c29 repo: Azure/azure-rest-api-specs additionalDirectories: From d9f5c3a0fb459b3f4f913d19b76512ed63c3c94e Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Mon, 14 Oct 2024 14:06:44 -0800 Subject: [PATCH 026/138] Jhakulin/azure ai client pylance (#37887) * Fixes to Pylance error * fix sample * update * use file_upload_and_poll * updates to async samples * remove comment as we are using poll methods * more fixes --- .../azure/ai/client/aio/operations/_patch.py | 4 +- .../azure/ai/client/models/_patch.py | 192 ++++++++++++------ .../azure/ai/client/operations/_patch.py | 127 +++++------- .../sample_agents_basics_async.py | 1 + .../sample_agents_functions_async.py | 21 +- ...sample_agents_stream_eventhandler_async.py | 1 + ..._stream_eventhandler_with_toolset_async.py | 55 +++-- .../sample_agents_stream_iteration_async.py | 1 + ...gents_with_file_search_attachment_async.py | 5 +- ...mple_agents_code_interpreter_attachment.py | 2 +- .../agents/sample_agents_file_search.py | 5 +- .../samples/agents/sample_agents_functions.py | 21 +- ...agents_stream_eventhandler_with_toolset.py | 55 +++-- ...le_agents_stream_iteration_with_toolset.py | 8 +- 14 files changed, 274 insertions(+), 224 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index 2f3fe6fc6154..53c4428c3e78 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -894,7 +894,7 @@ async def create_stream( tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AgentEventHandler] = None, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, **kwargs: Any, ) -> _models.AsyncAgentRunStream: """Creates a new run for an agent thread. @@ -1012,7 +1012,7 @@ async def create_stream( tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AgentEventHandler] = None, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, **kwargs: Any, ) -> _models.AsyncAgentRunStream: """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index bd5acd50c2d7..4f7a5c966996 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -11,8 +11,9 @@ import json import logging import base64 +import asyncio -from typing import List +from typing import List, Sequence from azure.core.credentials import TokenCredential, AccessToken from ._enums import AgentStreamEvent @@ -168,7 +169,7 @@ class Tool(ABC): @property @abstractmethod - def definitions(self) -> List[ToolDefinition]: + def definitions(self) -> Sequence[ToolDefinition]: """Get the tool definitions.""" pass @@ -258,7 +259,7 @@ def execute(self, tool_call: RequiredFunctionToolCall) -> Any: raise @property - def definitions(self) -> List[FunctionToolDefinition]: + def definitions(self) -> Sequence[FunctionToolDefinition]: """ Get the function definitions. @@ -307,7 +308,7 @@ def add_vector_store(self, store_id: str): self.vector_store_ids.append(store_id) @property - def definitions(self) -> List[FileSearchToolDefinition]: + def definitions(self) -> Sequence[FileSearchToolDefinition]: """ Get the file search tool definitions. """ @@ -341,7 +342,7 @@ def add_file(self, file_id: str): self.file_ids.append(file_id) @property - def definitions(self) -> List[CodeInterpreterToolDefinition]: + def definitions(self) -> Sequence[CodeInterpreterToolDefinition]: """ Get the code interpreter tool definitions. """ @@ -406,7 +407,7 @@ def remove(self, tool_type: Type[Tool]) -> None: raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") @property - def definitions(self) -> List[ToolDefinition]: + def definitions(self) -> Sequence[ToolDefinition]: """ Get the definitions for all tools in the tool set. """ @@ -416,7 +417,7 @@ def definitions(self) -> List[ToolDefinition]: return tools @property - def resources(self) -> Dict[str, Any]: + def resources(self) -> ToolResources: """ Get the resources for all tools in the tool set. """ @@ -429,7 +430,17 @@ def resources(self) -> Dict[str, Any]: tool_resources[key].update(value) else: tool_resources[key] = value - return tool_resources + return self._create_tool_resources_from_dict(tool_resources) + + def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolResources: + """ + Safely converts a dictionary into a ToolResources instance. + """ + try: + return ToolResources(**resources) + except TypeError as e: + logging.error(f"Error creating ToolResources: {e}") + raise ValueError("Invalid resources for ToolResources.") from e def get_definitions_and_resources(self) -> Dict[str, Any]: """ @@ -589,11 +600,47 @@ async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: pass -class BaseAgentRunStream: - def __enter__(self): +class AsyncAgentRunStream(AsyncIterator[Tuple[str, Any]]): + def __init__( + self, + response_iterator: AsyncIterator[bytes], + event_handler: Optional['AsyncAgentEventHandler'] = None, + ): + self.response_iterator = response_iterator + self.event_handler = event_handler + self.done = False + self.buffer = "" + + async def __aenter__(self): return self - def process_event(self, event_data_str: str) -> Tuple[str, Any]: + async def __aexit__(self, exc_type, exc_val, exc_tb): + close_method = getattr(self.response_iterator, "close", None) + if callable(close_method): + result = close_method() + if asyncio.iscoroutine(result): + await result + + def __aiter__(self): + return self + + async def __anext__(self) -> Tuple[str, Any]: + while True: + try: + chunk = await self.response_iterator.__anext__() + self.buffer += chunk.decode("utf-8") + except StopAsyncIteration: + if self.buffer: + event_data_str, self.buffer = self.buffer, "" + if event_data_str: + return await self._process_event(event_data_str) + raise StopAsyncIteration + + while "\n\n" in self.buffer: + event_data_str, self.buffer = self.buffer.split("\n\n", 1) + return await self._process_event(event_data_str) + + def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: event_lines = event_data_str.strip().split("\n") event_type = None event_data = "" @@ -613,7 +660,11 @@ def process_event(self, event_data_str: str) -> Tuple[str, Any]: parsed_data = event_data # Workaround for service bug: Rename 'expires_at' to 'expired_at' - if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: + if ( + event_type.startswith("thread.run.step") + and isinstance(parsed_data, dict) + and "expires_at" in parsed_data + ): parsed_data["expired_at"] = parsed_data.pop("expires_at") # Map to the appropriate class instance @@ -654,47 +705,8 @@ def process_event(self, event_data_str: str) -> Tuple[str, Any]: return event_type, event_data_obj - -class AsyncAgentRunStream(BaseAgentRunStream, AsyncIterator[Tuple[str, Any]]): - def __init__( - self, - response_iterator: AsyncIterator[bytes], - event_handler: Optional[AsyncAgentEventHandler] = None, - ): - self.response_iterator = response_iterator - self.event_handler = event_handler - self.done = False - self.buffer = "" - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_val, exc_tb): - close_method = getattr(self.response_iterator, "close", None) - if callable(close_method): - await close_method() - - def __aiter__(self): - return self - - async def __anext__(self) -> Tuple[str, Any]: - while True: - try: - chunk = await self.response_iterator.__anext__() - self.buffer += chunk.decode("utf-8") - except StopAsyncIteration: - if self.buffer: - event_data_str, self.buffer = self.buffer, "" - if event_data_str: - return await self.process_event(event_data_str) - raise StopAsyncIteration - - while "\n\n" in self.buffer: - event_data_str, self.buffer = self.buffer.split("\n\n", 1) - return await self.process_event(event_data_str) - - async def process_event(self, event_data_str: str) -> Tuple[str, Any]: - event_type, event_data_obj = super().process_event(event_data_str) + async def _process_event(self, event_data_str: str) -> Tuple[str, Any]: + event_type, event_data_obj = self._parse_event_data(event_data_str) if self.event_handler: try: @@ -731,7 +743,7 @@ async def until_done(self) -> None: pass -class AgentRunStream(BaseAgentRunStream, Iterator[Tuple[str, Any]]): +class AgentRunStream(Iterator[Tuple[str, Any]]): def __init__( self, response_iterator: Iterator[bytes], @@ -742,6 +754,9 @@ def __init__( self.done = False self.buffer = "" + def __enter__(self): + return self + def __exit__(self, exc_type, exc_val, exc_tb): close_method = getattr(self.response_iterator, "close", None) if callable(close_method): @@ -761,15 +776,76 @@ def __next__(self) -> Tuple[str, Any]: if self.buffer: event_data_str, self.buffer = self.buffer, "" if event_data_str: - return self.process_event(event_data_str) + return self._process_event(event_data_str) raise StopIteration while "\n\n" in self.buffer: event_data_str, self.buffer = self.buffer.split("\n\n", 1) - return self.process_event(event_data_str) + return self._process_event(event_data_str) + + def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: + event_lines = event_data_str.strip().split("\n") + event_type = None + event_data = "" + + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + try: + parsed_data = json.loads(event_data) + except json.JSONDecodeError: + parsed_data = event_data + + # Workaround for service bug: Rename 'expires_at' to 'expired_at' + if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: + parsed_data["expired_at"] = parsed_data.pop("expires_at") + + # Map to the appropriate class instance + if event_type in { + AgentStreamEvent.THREAD_RUN_CREATED, + AgentStreamEvent.THREAD_RUN_QUEUED, + AgentStreamEvent.THREAD_RUN_IN_PROGRESS, + AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION, + AgentStreamEvent.THREAD_RUN_COMPLETED, + AgentStreamEvent.THREAD_RUN_FAILED, + AgentStreamEvent.THREAD_RUN_CANCELLING, + AgentStreamEvent.THREAD_RUN_CANCELLED, + AgentStreamEvent.THREAD_RUN_EXPIRED, + }: + event_data_obj = ThreadRun(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + elif event_type in { + AgentStreamEvent.THREAD_RUN_STEP_CREATED, + AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, + AgentStreamEvent.THREAD_RUN_STEP_COMPLETED, + AgentStreamEvent.THREAD_RUN_STEP_FAILED, + AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, + AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, + }: + event_data_obj = RunStep(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + elif event_type in { + AgentStreamEvent.THREAD_MESSAGE_CREATED, + AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, + AgentStreamEvent.THREAD_MESSAGE_COMPLETED, + AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, + }: + event_data_obj = ThreadMessage(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: + event_data_obj = MessageDeltaChunk(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: + event_data_obj = RunStepDeltaChunk(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + else: + event_data_obj = parsed_data + + return event_type, event_data_obj - def process_event(self, event_data_str: str) -> Tuple[str, Any]: - event_type, event_data_obj = super().process_event(event_data_str) + def _process_event(self, event_data_str: str) -> Tuple[str, Any]: + event_type, event_data_obj = self._parse_event_data(event_data_str) if self.event_handler: try: diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index b5133b04d620..6fd6436483dc 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -9,7 +9,7 @@ """ import sys, io, logging, os, time from io import IOBase -from typing import List, Iterable, Union, IO, Any, Dict, Optional, overload, TYPE_CHECKING +from typing import List, Iterable, Union, IO, Any, Dict, Optional, overload, TYPE_CHECKING, Iterator, cast # from zoneinfo import ZoneInfo from ._operations import EndpointsOperations as EndpointsOperationsGenerated @@ -338,48 +338,6 @@ def create_agent(self, body: IO[bytes], *, content_type: str = "application/json :raises ~azure.core.exceptions.HttpResponseError: """ - @overload - def create_agent( - self, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """ - Creates a new agent with toolset. - - :keyword model: The ID of the model to use. Required if `body` is not provided. - :paramtype model: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: A description for the new agent. Default value is None. - :paramtype description: str - :keyword instructions: System instructions for the agent. Default value is None. - :paramtype instructions: str - :keyword toolset: Collection of tools (alternative to `tools` and `tool_resources`). Default - value is None. - :paramtype toolset: ~azure.ai.client.models.ToolSet - :keyword temperature: Sampling temperature for generating agent responses. Default value - is None. - :paramtype temperature: float - :keyword top_p: Nucleus sampling parameter. Default value is None. - :paramtype top_p: float - :keyword response_format: Response format for tool calls. Default value is None. - :paramtype response_format: ~azure.ai.client.models.AgentsApiResponseFormatOption - :keyword metadata: Key/value pairs for storing additional information. Default value is None. - :paramtype metadata: dict[str, str] - :return: An Agent object. - :rtype: ~azure.ai.client.models.Agent - :raises: ~azure.core.exceptions.HttpResponseError - """ - @distributed_trace def create_agent( self, @@ -426,7 +384,7 @@ def create_agent( if toolset is not None: self._toolset = toolset - tools = toolset.definitions + tools = list(toolset.definitions) tool_resources = toolset.resources return super().create_agent( @@ -492,7 +450,6 @@ def create_run( tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AgentEventHandler] = None, **kwargs: Any, ) -> _models.ThreadRun: """Creates a new run for an agent thread. @@ -564,9 +521,6 @@ def create_run( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -610,7 +564,6 @@ def create_run( tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AgentEventHandler] = None, **kwargs: Any, ) -> _models.ThreadRun: """Creates a new run for an agent thread. @@ -681,9 +634,6 @@ def create_run( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -743,7 +693,6 @@ def create_and_process_run( tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AgentEventHandler] = None, sleep_interval: int = 1, **kwargs: Any, ) -> _models.ThreadRun: @@ -815,9 +764,6 @@ def create_and_process_run( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler :keyword sleep_interval: The time in seconds to wait between polling the service for run status. Default value is 1. :paramtype sleep_interval: int @@ -842,7 +788,6 @@ def create_and_process_run( tool_choice=tool_choice, response_format=response_format, metadata=metadata, - event_handler=event_handler, **kwargs, ) @@ -851,7 +796,7 @@ def create_and_process_run( time.sleep(sleep_interval) run = self.get_run(thread_id=thread_id, run_id=run.id) - if run.status == "requires_action" and run.required_action.submit_tool_outputs: + if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: logging.warning("No tool calls provided - cancelling run") @@ -1139,8 +1084,10 @@ def create_stream( else: raise ValueError("Invalid combination of arguments provided.") + + response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) - return _models.AgentRunStream(response, event_handler) + return _models.AgentRunStream(response_iterator, event_handler) @overload @@ -1390,7 +1337,10 @@ def submit_tool_outputs_to_stream( else: raise ValueError("Invalid combination of arguments provided.") - return _models.AgentRunStream(response, event_handler) + # Cast the response to Iterator[bytes] for type correctness + response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) + + return _models.AgentRunStream(response_iterator, event_handler) @overload def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: @@ -1572,13 +1522,23 @@ def upload_file_and_poll( :raises IOError: If there are issues with reading the file. :raises: HttpResponseError for HTTP errors. """ - file = self.upload_file(body=body, file=file, file_path=file_path, purpose=purpose, filename=filename, **kwargs) - - while file.status in ["uploaded", "pending", "running"]: + if body is not None: + uploaded_file = self.upload_file(body=body, **kwargs) + elif file is not None and purpose is not None: + uploaded_file = self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + elif file_path is not None and purpose is not None: + uploaded_file = self.upload_file(file_path=file_path, purpose=purpose, **kwargs) + else: + raise ValueError( + "Invalid parameters for upload_file_and_poll. Please provide either 'body', " + "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." + ) + + while uploaded_file.status in {"uploaded", "pending", "running"}: time.sleep(sleep_interval) - file = self.get_file(file.id) - - return file + uploaded_file = self.get_file(uploaded_file.id) + + return uploaded_file @overload def create_vector_store_and_poll( @@ -1658,17 +1618,19 @@ def create_vector_store_and_poll( :rtype: ~azure.ai.client.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ + @distributed_trace def create_vector_store_and_poll( self, - body: Union[JSON, IO[bytes]] = None, + body: Union[JSON, IO[bytes], None] = None, *, + content_type: str = "application/json", file_ids: Optional[List[str]] = None, name: Optional[str] = None, expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, + sleep_interval: float = 1, **kwargs: Any ) -> _models.VectorStore: """Creates a vector store. @@ -1698,19 +1660,28 @@ def create_vector_store_and_poll( :raises ~azure.core.exceptions.HttpResponseError: """ - vector_store = self.create_vector_store( - body=body, - file_ids=file_ids, - name=name, - expires_after=expires_after, - chunking_strategy=chunking_strategy, - metadata=metadata, - **kwargs - ) + if body is not None: + vector_store = self.create_vector_store(body=body, content_type=content_type, **kwargs) + elif file_ids is not None or (name is not None and expires_after is not None): + vector_store = self.create_vector_store( + content_type=content_type, + file_ids=file_ids, + name=name, + expires_after=expires_after, + chunking_strategy=chunking_strategy, + metadata=metadata, + **kwargs + ) + else: + raise ValueError( + "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " + "'file_ids', or 'name' and 'expires_after'." + ) + while vector_store.status == "in_progress": time.sleep(sleep_interval) vector_store = self.get_vector_store(vector_store.id) - + return vector_store diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py index fd80e5077a43..7f7a2480d8f2 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py @@ -28,6 +28,7 @@ import os + async def main(): # Create an Azure AI Client from a connection string, copied from your AI Studio project. diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py index 611da3150afe..673cdfe87a98 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py @@ -24,13 +24,14 @@ import time from azure.ai.client.aio import AzureAIClient -from azure.ai.client.models import AsyncFunctionTool +from azure.ai.client.models import AsyncFunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction from azure.identity import DefaultAzureCredential import os from user_async_functions import user_async_functions + async def main(): # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" @@ -62,7 +63,6 @@ async def main(): model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", tools=functions.definitions ) print(f"Created agent, agent ID: {agent.id}") - print("Created assistant client") # Create thread for communication thread = await ai_client.agents.create_thread() @@ -81,7 +81,7 @@ async def main(): time.sleep(4) run = await ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) - if run.status == "requires_action" and run.required_action.submit_tool_outputs: + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print("No tool calls provided - cancelling run") @@ -90,12 +90,15 @@ async def main(): tool_outputs = [] for tool_call in tool_calls: - output = await functions.execute(tool_call) - tool_output = { - "tool_call_id": tool_call.id, - "output": output, - } - tool_outputs.append(tool_output) + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append({ + "tool_call_id": tool_call.id, + "output": output, + }) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") print(f"Tool outputs: {tool_outputs}") if tool_outputs: diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py index 8d11336e1dad..6363f45880cd 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -30,6 +30,7 @@ import os + class MyEventHandler(AsyncAgentEventHandler): async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: for content_part in delta.delta.content: diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py index c6d8cd869b59..b53df06275e9 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py @@ -24,16 +24,16 @@ from typing import Any from azure.ai.client.aio import AzureAIClient -from azure.ai.client.models import _models from azure.ai.client.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, SubmitToolOutputsAction, ThreadMessage, ThreadRun from azure.ai.client.models import AsyncAgentEventHandler, AsyncFunctionTool, AsyncToolSet -from azure.ai.client.operations._patch import AgentsOperations +from azure.ai.client.aio.operations import AgentsOperations from azure.identity import DefaultAzureCredential import os from user_async_functions import user_async_functions + class MyEventHandler(AsyncAgentEventHandler): def __init__(self, agents: AgentsOperations) -> None: @@ -60,9 +60,6 @@ async def on_thread_run(self, run: "ThreadRun") -> None: async def on_run_step(self, step: "RunStep") -> None: print(f"RunStep type: {step.type}, Status: {step.status}") - async def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - async def on_error(self, data: str) -> None: print(f"An error occurred. Data: {data}") @@ -73,29 +70,31 @@ async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") async def _handle_submit_tool_outputs(self, run: "ThreadRun") -> None: - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print("No tool calls to execute.") - return - if not self._agents: - print("AssistantClient not set. Cannot execute tool calls using toolset.") - return - - toolset = self._agents.get_toolset() - if toolset: - tool_outputs = await toolset.execute_tool_calls(tool_calls) - else: - raise ValueError("Toolset is not available in the client.") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - async with await self._agents.submit_tool_outputs_to_stream( - thread_id=run.thread_id, - run_id=run.id, - tool_outputs=tool_outputs, - event_handler=self - ) as stream: - await stream.until_done() + if isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls to execute.") + return + if not self._agents: + print("AssistantClient not set. Cannot execute tool calls using toolset.") + return + + toolset = self._agents.get_toolset() + if toolset: + tool_outputs = await toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + async with await self._agents.submit_tool_outputs_to_stream( + thread_id=run.thread_id, + run_id=run.id, + tool_outputs=tool_outputs, + event_handler=self + ) as stream: + await stream.until_done() + async def main(): # Create an Azure AI Client from a connection string, copied from your AI Studio project. diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py index 6d1cf2fd89cb..9baf34739fd6 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py @@ -29,6 +29,7 @@ import os + async def main(): # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py index 3342d0741305..0df2df3078b3 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -29,6 +29,7 @@ import os + async def main(): # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" @@ -63,7 +64,7 @@ async def main(): # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to search the file # also, you do not need to provide tool_resources if you did not create a vector store above - agent = ai_client.agents.create_agent( + agent = await ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", tools=[file_search_tool], tool_resources=ToolResources(file_search=FileSearchToolResource(vector_store_ids=[vector_store.id])) @@ -89,7 +90,7 @@ async def main(): await ai_client.agents.delete_vector_store(vector_store.id) print("Deleted vectore store") - await ai_client.agents.delete_assistant(agent.id) + await ai_client.agents.delete_agent(agent.id) print("Deleted assistant") messages = await ai_client.agents.list_messages(thread_id=thread.id) diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py index 5f36c9de0e9e..c80b2958a799 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py @@ -61,7 +61,7 @@ # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to view the file agent = ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", - tools=[code_interpreter] + tools=list(code_interpreter.definitions) ) print(f"Created assistant, assistant ID: {agent.id}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py index 3416906e0dbd..1f61d647b01f 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py @@ -56,7 +56,7 @@ with ai_client: # Create file search tool file_search = FileSearchTool() - openai_file = ai_client.agents.upload_file(file_path="product_info_1.md", purpose="assistants") + openai_file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") print(f"Uploaded file, file ID: {openai_file.id}") openai_vectorstore = ai_client.agents.create_vector_store_and_poll(file_ids=[openai_file.id], name="my_vectorstore") @@ -82,9 +82,6 @@ print(f"Created message, ID: {message.id}") # Create and process assistant run in thread with tools - # Note: If vector store has been created just before this, there can be need to poll the status of vector store to be ready for information retrieval - # This can be done by calling `assistant_client.get_vector_store(vector_store_id)` and checking the status of vector store - # We may want to add conveniency around this run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Run finished with status: {run.status}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py index 585874f0bf90..62344d78e059 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py @@ -23,7 +23,7 @@ import os, time from azure.ai.client import AzureAIClient from azure.identity import DefaultAzureCredential -from azure.ai.client.models import FunctionTool +from azure.ai.client.models import FunctionTool, SubmitToolOutputsAction, RequiredFunctionToolCall from user_functions import user_functions @@ -45,7 +45,7 @@ model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", - tools=functions.definitions, + tools=list(functions.definitions), ) print(f"Created agent, ID: {agent.id}") @@ -66,7 +66,7 @@ time.sleep(1) run = ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) - if run.status == "requires_action" and run.required_action.submit_tool_outputs: + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print("No tool calls provided - cancelling run") @@ -75,12 +75,15 @@ tool_outputs = [] for tool_call in tool_calls: - output = functions.execute(tool_call) - tool_output = { - "tool_call_id": tool_call.id, - "output": output, - } - tool_outputs.append(tool_output) + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append({ + "tool_call_id": tool_call.id, + "output": output, + }) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") print(f"Tool outputs: {tool_outputs}") if tool_outputs: diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index 1071cd24aa58..5628c36e7d9f 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -25,7 +25,7 @@ from azure.ai.client import AzureAIClient from azure.ai.client.models import Agent, MessageDeltaChunk, MessageDeltaTextContent, RunStep, SubmitToolOutputsAction, ThreadMessage, ThreadRun from azure.ai.client.models import AgentEventHandler -from azure.ai.client.operations._patch import AgentsOperations +from azure.ai.client.operations import AgentsOperations from azure.identity import DefaultAzureCredential from azure.ai.client.models import FunctionTool, ToolSet @@ -84,9 +84,6 @@ def on_thread_run(self, run: "ThreadRun") -> None: def on_run_step(self, step: "RunStep") -> None: print(f"RunStep type: {step.type}, Status: {step.status}") - def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - def on_error(self, data: str) -> None: print(f"An error occurred. Data: {data}") @@ -97,34 +94,34 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") def _handle_submit_tool_outputs(self, run: "ThreadRun") -> None: - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print("No tool calls to execute.") - return - - toolset = self._agents.get_toolset() - if toolset: - tool_outputs = toolset.execute_tool_calls(tool_calls) - else: - raise ValueError("Toolset is not available in the client.") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - with self._agents.submit_tool_outputs_to_stream( - thread_id=run.thread_id, - run_id=run.id, - tool_outputs=tool_outputs, - event_handler=self - ) as stream: - stream.until_done() - - -functions = FunctionTool(user_functions) -toolset = ToolSet() -toolset.add(functions) + if isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls to execute.") + return + + toolset = self._agents.get_toolset() + if toolset: + tool_outputs = toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + with self._agents.submit_tool_outputs_to_stream( + thread_id=run.thread_id, + run_id=run.id, + tool_outputs=tool_outputs, + event_handler=self + ) as stream: + stream.until_done() with ai_client: + functions = FunctionTool(user_functions) + toolset = ToolSet() + toolset.add(functions) + agent = ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset ) diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py index a34fb5c1912e..7a400b2e6b60 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py @@ -24,9 +24,9 @@ import os from azure.ai.client import AzureAIClient from azure.ai.client.models import AgentStreamEvent -from azure.ai.client.models import Agent, MessageDeltaChunk, MessageDeltaTextContent, RunStep, SubmitToolOutputsAction, ThreadMessage, ThreadRun +from azure.ai.client.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, SubmitToolOutputsAction, ThreadMessage, ThreadRun from azure.ai.client.models import FunctionTool, ToolSet -from azure.ai.client.operations._operations import AgentsOperations +from azure.ai.client.operations import AgentsOperations from azure.identity import DefaultAzureCredential from user_functions import user_functions @@ -53,9 +53,9 @@ """ # Function to handle tool stream iteration -def handle_submit_tool_outputs(operatiions: AgentsOperations, thread_id, run_id, tool_outputs): +def handle_submit_tool_outputs(operations: AgentsOperations, thread_id, run_id, tool_outputs): try: - with operatiions.submit_tool_outputs_to_stream( + with operations.submit_tool_outputs_to_stream( thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, From d8d3a162cf6b81fa819dbebbf1704894220b64d0 Mon Sep 17 00:00:00 2001 From: howieleung Date: Tue, 15 Oct 2024 09:20:07 -0700 Subject: [PATCH 027/138] Use List instead of Sequence and fixed samples (#37898) --- .../azure/ai/client/models/_patch.py | 15 +++++++-------- .../sample_agents_functions_async.py | 2 +- .../sample_agents_code_interpreter_attachment.py | 7 +++---- .../samples/agents/sample_agents_functions.py | 2 +- .../sample_agents_with_file_search_attachment.py | 3 +-- 5 files changed, 13 insertions(+), 16 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index 4f7a5c966996..1c88e8a4a77b 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -13,7 +13,6 @@ import base64 import asyncio -from typing import List, Sequence from azure.core.credentials import TokenCredential, AccessToken from ._enums import AgentStreamEvent @@ -169,7 +168,7 @@ class Tool(ABC): @property @abstractmethod - def definitions(self) -> Sequence[ToolDefinition]: + def definitions(self) -> List[ToolDefinition]: """Get the tool definitions.""" pass @@ -204,7 +203,7 @@ def __init__(self, functions: Dict[str, Any]): self._functions = functions self._definitions = self._build_function_definitions(functions) - def _build_function_definitions(self, functions: Dict[str, Any]) -> List[FunctionToolDefinition]: + def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDefinition]: specs = [] for name, func in functions.items(): sig = inspect.signature(func) @@ -259,7 +258,7 @@ def execute(self, tool_call: RequiredFunctionToolCall) -> Any: raise @property - def definitions(self) -> Sequence[FunctionToolDefinition]: + def definitions(self) -> List[ToolDefinition]: """ Get the function definitions. @@ -308,7 +307,7 @@ def add_vector_store(self, store_id: str): self.vector_store_ids.append(store_id) @property - def definitions(self) -> Sequence[FileSearchToolDefinition]: + def definitions(self) -> List[ToolDefinition]: """ Get the file search tool definitions. """ @@ -342,7 +341,7 @@ def add_file(self, file_id: str): self.file_ids.append(file_id) @property - def definitions(self) -> Sequence[CodeInterpreterToolDefinition]: + def definitions(self) -> List[ToolDefinition]: """ Get the code interpreter tool definitions. """ @@ -365,7 +364,7 @@ class ToolSet: """ def __init__(self): - self._tools = [] + self._tools: List[Tool] = [] def validate_tool_type(self, tool_type: Type[Tool]) -> None: """ @@ -407,7 +406,7 @@ def remove(self, tool_type: Type[Tool]) -> None: raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") @property - def definitions(self) -> Sequence[ToolDefinition]: + def definitions(self) -> List[ToolDefinition]: """ Get the definitions for all tools in the tool set. """ diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py index 673cdfe87a98..ff553828cc1d 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py @@ -92,7 +92,7 @@ async def main(): for tool_call in tool_calls: if isinstance(tool_call, RequiredFunctionToolCall): try: - output = functions.execute(tool_call) + output = await functions.execute(tool_call) tool_outputs.append({ "tool_call_id": tool_call.id, "output": output, diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py index c80b2958a799..61f5ad76f3be 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py @@ -60,16 +60,15 @@ # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to view the file agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", - tools=list(code_interpreter.definitions) + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" ) - print(f"Created assistant, assistant ID: {agent.id}") + print(f"Created agent, agent ID: {agent.id}") thread = ai_client.agents.create_thread() print(f"Created thread, thread ID: {thread.id}") # create a message with the attachment - attachment = MessageAttachment(file_id=file.id, tools=[code_interpreter]) + attachment = MessageAttachment(file_id=file.id, tools=code_interpreter.definitions) message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment]) print(f"Created message, message ID: {message.id}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py index 62344d78e059..008b6af5ca9f 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py @@ -45,7 +45,7 @@ model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", - tools=list(functions.definitions), + tools=functions.definitions, ) print(f"Created agent, ID: {agent.id}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py index 293d32c539b7..cd599c19347f 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py @@ -68,7 +68,6 @@ # also, you do not need to provide tool_resources if you did not create a vector store above agent = ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", - tools=[file_search_tool], tool_resources=ToolResources(file_search=FileSearchToolResource(vector_store_ids=[vector_store.id])) ) print(f"Created agent, agent ID: {agent.id}") @@ -77,7 +76,7 @@ print(f"Created thread, thread ID: {thread.id}") # create a message with the attachment - attachment = MessageAttachment(file_id=file.id, tools=[file_search_tool.definitions]) + attachment = MessageAttachment(file_id=file.id, tools=file_search_tool.definitions) message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment]) print(f"Created message, message ID: {message.id}") From e672df76fd13c0911c8d724a4bbf522f3d11cdb0 Mon Sep 17 00:00:00 2001 From: howieleung Date: Tue, 15 Oct 2024 20:42:14 -0700 Subject: [PATCH 028/138] =?UTF-8?q?fixed=20warning=20for=20aio=20and=20get?= =?UTF-8?q?=20call=20function=20tools=20for=20stream=20within=20t=E2=80=A6?= =?UTF-8?q?=20(#37913)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fixed warning for aio and get call function tools for stream within the SDK * Fixed another warning * Fixed return type --- .../azure/ai/client/aio/operations/_patch.py | 206 +++++++++--------- .../azure/ai/client/models/_patch.py | 11 +- .../azure/ai/client/operations/_patch.py | 41 +++- .../sample_agents_basics_async.py | 2 +- .../sample_agents_functions_async.py | 4 +- ...sample_agents_stream_eventhandler_async.py | 2 +- ..._stream_eventhandler_with_toolset_async.py | 38 +--- .../sample_agents_stream_iteration_async.py | 2 +- ...gents_with_file_search_attachment_async.py | 2 +- ...mple_agents_code_interpreter_attachment.py | 2 +- .../agents/sample_agents_file_search.py | 4 +- ...agents_stream_eventhandler_with_toolset.py | 34 +-- ...le_agents_stream_iteration_with_toolset.py | 19 +- ...mple_agents_with_file_search_attachment.py | 2 +- 14 files changed, 160 insertions(+), 209 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index 53c4428c3e78..2bcb657ab443 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -6,12 +6,12 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from argparse import FileType +from ..._vendor import FileType import io import logging import os import time -from typing import IO, Any, Dict, List, AsyncIterable, MutableMapping, Optional, Union, overload +from typing import IO, Any, AsyncIterator, Dict, List, AsyncIterable, MutableMapping, Optional, Union, cast, overload from azure.ai.client import _types from ._operations import EndpointsOperations as EndpointsOperationsGenerated @@ -322,48 +322,6 @@ async def create_agent(self, body: IO[bytes], *, content_type: str = "applicatio :raises ~azure.core.exceptions.HttpResponseError: """ - @overload - async def create_agent( - self, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """ - Creates a new agent with toolset. - - :keyword model: The ID of the model to use. Required if `body` is not provided. - :paramtype model: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: A description for the new agent. Default value is None. - :paramtype description: str - :keyword instructions: System instructions for the agent. Default value is None. - :paramtype instructions: str - :keyword toolset: Collection of tools (alternative to `tools` and `tool_resources`). Default - value is None. - :paramtype toolset: ~azure.ai.client.models.ToolSet - :keyword temperature: Sampling temperature for generating agent responses. Default value - is None. - :paramtype temperature: float - :keyword top_p: Nucleus sampling parameter. Default value is None. - :paramtype top_p: float - :keyword response_format: Response format for tool calls. Default value is None. - :paramtype response_format: ~azure.ai.client.models.AgentsApiResponseFormatOption - :keyword metadata: Key/value pairs for storing additional information. Default value is None. - :paramtype metadata: dict[str, str] - :return: An Agent object. - :rtype: ~azure.ai.client.models.Agent - :raises: ~azure.core.exceptions.HttpResponseError - """ - @distributed_trace_async async def create_agent( self, @@ -375,7 +333,7 @@ async def create_agent( instructions: Optional[str] = None, tools: Optional[List[_models.ToolDefinition]] = None, tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.ToolSet] = None, + toolset: Optional[_models.AsyncToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, @@ -427,12 +385,12 @@ async def create_agent( **kwargs, ) - def get_toolset(self) -> Optional[_models.ToolSet]: + def get_toolset(self) -> Optional[_models.AsyncToolSet]: """ Get the toolset for the agent. :return: The toolset for the agent. If not set, returns None. - :rtype: ~azure.ai.client.models.ToolSet + :rtype: ~azure.ai.client.models.AsyncToolSet """ if hasattr(self, "_toolset"): return self._toolset @@ -476,7 +434,6 @@ async def create_run( tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AgentEventHandler] = None, **kwargs: Any, ) -> _models.ThreadRun: """Creates a new run for an agent thread. @@ -548,9 +505,6 @@ async def create_run( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -594,7 +548,6 @@ async def create_run( tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AgentEventHandler] = None, **kwargs: Any, ) -> _models.ThreadRun: """Creates a new run for an agent thread. @@ -665,9 +618,6 @@ async def create_run( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -727,7 +677,6 @@ async def create_and_process_run( tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AgentEventHandler] = None, sleep_interval: int = 1, **kwargs: Any, ) -> _models.ThreadRun: @@ -799,13 +748,10 @@ async def create_and_process_run( 64 characters in length and values may be up to 512 characters in length. Default value is None. :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler :keyword sleep_interval: The time in seconds to wait between polling the service for run status. Default value is 1. :paramtype sleep_interval: int - :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. :rtype: ~azure.ai.client.models.AsyncAgentRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -826,7 +772,6 @@ async def create_and_process_run( tool_choice=tool_choice, response_format=response_format, metadata=metadata, - event_handler=event_handler, **kwargs, ) @@ -835,11 +780,11 @@ async def create_and_process_run( time.sleep(sleep_interval) run = await self.get_run(thread_id=thread_id, run_id=run.id) - if run.status == "requires_action" and run.required_action.submit_tool_outputs: + if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: logging.warning("No tool calls provided - cancelling run") - self.cancel_run(thread_id=thread_id, run_id=run.id) + await self.cancel_run(thread_id=thread_id, run_id=run.id) break toolset = self.get_toolset() @@ -850,14 +795,14 @@ async def create_and_process_run( logging.info("Tool outputs: %s", tool_outputs) if tool_outputs: - self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) + await self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) logging.info("Current run status: %s", run.status) return run @overload - async def create_stream( + def create_stream( self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.AsyncAgentRunStream: """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. @@ -869,7 +814,7 @@ async def create_stream( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. :rtype: ~azure.ai.client.models.AsyncAgentRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -897,7 +842,7 @@ async def create_stream( event_handler: Optional[_models.AsyncAgentEventHandler] = None, **kwargs: Any, ) -> _models.AsyncAgentRunStream: - """Creates a new run for an agent thread. + """Creates a new stream for an agent thread. :param thread_id: Required. :type thread_id: str @@ -968,8 +913,8 @@ async def create_stream( :paramtype metadata: dict[str, str] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler - :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. :rtype: ~azure.ai.client.models.AsyncAgentRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -987,7 +932,7 @@ async def create_stream( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. :rtype: ~azure.ai.client.models.AsyncAgentRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1085,8 +1030,8 @@ async def create_stream( :paramtype metadata: dict[str, str] :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler - :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. :rtype: ~azure.ai.client.models.AsyncAgentRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1123,8 +1068,10 @@ async def create_stream( else: raise ValueError("Invalid combination of arguments provided.") + + response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) - return _models.AsyncAgentRunStream(await response, event_handler) + return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) @overload async def submit_tool_outputs_to_run( @@ -1174,7 +1121,7 @@ async def submit_tool_outputs_to_run( :paramtype content_type: str :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler + :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.client.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -1255,7 +1202,7 @@ async def submit_tool_outputs_to_run( async def submit_tool_outputs_to_stream( self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.AsyncAgentRunStream: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. @@ -1268,7 +1215,7 @@ async def submit_tool_outputs_to_stream( :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str - :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. :rtype: ~azure.ai.client.models.AsyncAgentRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1284,7 +1231,7 @@ async def submit_tool_outputs_to_stream( event_handler: Optional[_models.AsyncAgentEventHandler] = None, **kwargs: Any, ) -> _models.AsyncAgentRunStream: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. @@ -1299,8 +1246,8 @@ async def submit_tool_outputs_to_stream( :paramtype content_type: str :keyword event_handler: The event handler to use for processing events during the run. Default value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler - :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. :rtype: ~azure.ai.client.models.AsyncAgentRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1309,7 +1256,7 @@ async def submit_tool_outputs_to_stream( async def submit_tool_outputs_to_stream( self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.AsyncAgentRunStream: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. @@ -1322,7 +1269,7 @@ async def submit_tool_outputs_to_stream( :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str - :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. :rtype: ~azure.ai.client.models.AsyncAgentRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1335,10 +1282,10 @@ async def submit_tool_outputs_to_stream( body: Union[JSON, IO[bytes]] = _Unset, *, tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: Optional[_models.AgentEventHandler] = None, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, **kwargs: Any, ) -> _models.AsyncAgentRunStream: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. @@ -1352,7 +1299,7 @@ async def submit_tool_outputs_to_stream( :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] :param event_handler: The event handler to use for processing events during the run. :param kwargs: Additional parameters. - :return: AsyncAgentRunStream. AsyncAgentRunStream is compatible with Iterable and supports streaming. + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. :rtype: ~azure.ai.client.models.AsyncAgentRunStream :raises ~azure.core.exceptions.HttpResponseError: """ @@ -1373,9 +1320,34 @@ async def submit_tool_outputs_to_stream( else: raise ValueError("Invalid combination of arguments provided.") - return _models.AsyncAgentRunStream(await response, event_handler) + # Cast the response to Iterator[bytes] for type correctness + response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) + + return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + + async def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handler: Optional[_models.AsyncAgentEventHandler] = None) -> None: + if isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logger.debug("No tool calls to execute.") + return + + toolset = self.get_toolset() + if toolset: + tool_outputs = await toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + logger.info(f"Tool outputs: {tool_outputs}") + if tool_outputs: + async with await self.submit_tool_outputs_to_stream( + thread_id=run.thread_id, + run_id=run.id, + tool_outputs=tool_outputs, + event_handler=event_handler + ) as stream: + await stream.until_done() - @overload async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. @@ -1461,7 +1433,7 @@ async def upload_file( raise FileNotFoundError(f"The file path provided does not exist: {file_path}") try: - with open(file_path, 'rb') as f: + with open(file_path, "rb") as f: content = f.read() # Determine filename and create correct FileType @@ -1473,7 +1445,7 @@ async def upload_file( raise IOError(f"Unable to read file: {file_path}. Reason: {str(e)}") raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") - + @overload async def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. @@ -1558,13 +1530,23 @@ async def upload_file_and_poll( :raises IOError: If there are issues with reading the file. :raises: HttpResponseError for HTTP errors. """ - file = await self.upload_file(body=body, file=file, file_path=file_path, purpose=purpose, filename=filename, **kwargs) - - while file.status in ["uploaded", "pending", "running"]: + if body is not None: + uploaded_file = await self.upload_file(body=body, **kwargs) + elif file is not None and purpose is not None: + uploaded_file = await self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + elif file_path is not None and purpose is not None: + uploaded_file = await self.upload_file(file_path=file_path, purpose=purpose, **kwargs) + else: + raise ValueError( + "Invalid parameters for upload_file_and_poll. Please provide either 'body', " + "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." + ) + + while uploaded_file.status in {"uploaded", "pending", "running"}: time.sleep(sleep_interval) - file = await self.get_file(file.id) - - return file + uploaded_file = await self.get_file(uploaded_file.id) + + return uploaded_file @overload async def create_vector_store_and_poll( @@ -1644,17 +1626,19 @@ async def create_vector_store_and_poll( :rtype: ~azure.ai.client.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ + @distributed_trace_async async def create_vector_store_and_poll( self, - body: Union[JSON, IO[bytes]] = None, + body: Union[JSON, IO[bytes], None] = None, *, + content_type: str = "application/json", file_ids: Optional[List[str]] = None, name: Optional[str] = None, expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, + sleep_interval: float = 1, **kwargs: Any ) -> _models.VectorStore: """Creates a vector store. @@ -1684,21 +1668,31 @@ async def create_vector_store_and_poll( :raises ~azure.core.exceptions.HttpResponseError: """ - vector_store = await self.create_vector_store( - body=body, - file_ids=file_ids, - name=name, - expires_after=expires_after, - chunking_strategy=chunking_strategy, - metadata=metadata, - **kwargs - ) + if body is not None: + vector_store = await self.create_vector_store(body=body, content_type=content_type, **kwargs) + elif file_ids is not None or (name is not None and expires_after is not None): + vector_store = await self.create_vector_store( + content_type=content_type, + file_ids=file_ids, + name=name, + expires_after=expires_after, + chunking_strategy=chunking_strategy, + metadata=metadata, + **kwargs + ) + else: + raise ValueError( + "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " + "'file_ids', or 'name' and 'expires_after'." + ) + while vector_store.status == "in_progress": time.sleep(sleep_interval) vector_store = await self.get_vector_store(vector_store.id) - + return vector_store + __all__: List[str] = [ "AgentsOperations", "EndpointsOperations", diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index 1c88e8a4a77b..2e0431cc862f 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -19,6 +19,7 @@ from ._models import ( ConnectionsListSecretsResponse, MessageDeltaChunk, + SubmitToolOutputsAction, ThreadRun, RunStep, ThreadMessage, @@ -35,7 +36,7 @@ ) from abc import ABC, abstractmethod -from typing import AsyncIterator, List, Dict, Any, Type, Optional, Iterator, Tuple, get_origin +from typing import AsyncIterator, Awaitable, Callable, List, Dict, Any, Type, Optional, Iterator, Tuple, get_origin logger = logging.getLogger(__name__) @@ -603,12 +604,14 @@ class AsyncAgentRunStream(AsyncIterator[Tuple[str, Any]]): def __init__( self, response_iterator: AsyncIterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, Optional[AsyncAgentEventHandler]], Awaitable[None]], event_handler: Optional['AsyncAgentEventHandler'] = None, ): self.response_iterator = response_iterator self.event_handler = event_handler self.done = False self.buffer = "" + self.submit_tool_outputs = submit_tool_outputs async def __aenter__(self): return self @@ -707,6 +710,8 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: async def _process_event(self, event_data_str: str) -> Tuple[str, Any]: event_type, event_data_obj = self._parse_event_data(event_data_str) + if isinstance(event_data_obj, ThreadRun) and event_data_obj.status == "requires_action" and isinstance(event_data_obj.required_action, SubmitToolOutputsAction): + await self.submit_tool_outputs(event_data_obj, self.event_handler) if self.event_handler: try: if isinstance(event_data_obj, MessageDeltaChunk): @@ -746,12 +751,14 @@ class AgentRunStream(Iterator[Tuple[str, Any]]): def __init__( self, response_iterator: Iterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, Optional[AgentEventHandler]], None], event_handler: Optional[AgentEventHandler] = None, ): self.response_iterator = response_iterator self.event_handler = event_handler self.done = False self.buffer = "" + self.submit_tool_outputs = submit_tool_outputs def __enter__(self): return self @@ -846,6 +853,8 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: def _process_event(self, event_data_str: str) -> Tuple[str, Any]: event_type, event_data_obj = self._parse_event_data(event_data_str) + if isinstance(event_data_obj, ThreadRun) and event_data_obj.status == "requires_action" and isinstance(event_data_obj.required_action, SubmitToolOutputsAction): + self.submit_tool_outputs(event_data_obj, self.event_handler) if self.event_handler: try: if isinstance(event_data_obj, MessageDeltaChunk): diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index 6fd6436483dc..00d2d4740895 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -384,7 +384,7 @@ def create_agent( if toolset is not None: self._toolset = toolset - tools = list(toolset.definitions) + tools = toolset.definitions tool_resources = toolset.resources return super().create_agent( @@ -820,7 +820,7 @@ def create_and_process_run( @overload def create_stream( self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> Union[_models.ThreadRun, _models.AgentRunStream]: + ) -> _models.AgentRunStream: """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. :param thread_id: Required. @@ -858,7 +858,7 @@ def create_stream( event_handler: Optional[_models.AgentEventHandler] = None, **kwargs: Any, ) -> _models.AgentRunStream: - """Creates a new run for an agent thread. + """Creates a new stream for an agent thread. :param thread_id: Required. :type thread_id: str @@ -1087,7 +1087,7 @@ def create_stream( response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) - return _models.AgentRunStream(response_iterator, event_handler) + return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) @overload @@ -1219,7 +1219,7 @@ def submit_tool_outputs_to_run( def submit_tool_outputs_to_stream( self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.AgentRunStream: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. @@ -1248,7 +1248,7 @@ def submit_tool_outputs_to_stream( event_handler: Optional[_models.AgentEventHandler] = None, **kwargs: Any, ) -> _models.AgentRunStream: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. @@ -1273,7 +1273,7 @@ def submit_tool_outputs_to_stream( def submit_tool_outputs_to_stream( self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.AgentRunStream: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. @@ -1302,7 +1302,7 @@ def submit_tool_outputs_to_stream( event_handler: Optional[_models.AgentEventHandler] = None, **kwargs: Any, ) -> _models.AgentRunStream: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool outputs will have a status of 'requires_action' with a required_action.type of 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. @@ -1340,7 +1340,30 @@ def submit_tool_outputs_to_stream( # Cast the response to Iterator[bytes] for type correctness response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) - return _models.AgentRunStream(response_iterator, event_handler) + return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + + def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handler: Optional[_models.AgentEventHandler] = None) -> None: + if isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logger.debug("No tool calls to execute.") + return + + toolset = self.get_toolset() + if toolset: + tool_outputs = toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + logger.info(f"Tool outputs: {tool_outputs}") + if tool_outputs: + with self.submit_tool_outputs_to_stream( + thread_id=run.thread_id, + run_id=run.id, + tool_outputs=tool_outputs, + event_handler=event_handler + ) as stream: + stream.until_done() @overload def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py index 7f7a2480d8f2..2e6a67c27dde 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py @@ -77,7 +77,7 @@ async def main(): print(f"Run completed with status: {run.status}") await ai_client.agents.delete_agent(agent.id) - print("Deleted assistant") + print("Deleted agent") messages = await ai_client.agents.list_messages(thread_id=thread.id) print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py index ff553828cc1d..3359c92c6918 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py @@ -110,9 +110,9 @@ async def main(): print(f"Run completed with status: {run.status}") - # Delete the assistant when done + # Delete the agent when done await ai_client.agents.delete_agent(agent.id) - print("Deleted assistant") + print("Deleted agent") # Fetch and log all messages messages = await ai_client.agents.list_messages(thread_id=thread.id) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py index 6363f45880cd..95f9cde57391 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -99,7 +99,7 @@ async def main(): await stream.until_done() await ai_client.agents.delete_agent(agent.id) - print("Deleted assistant") + print("Deleted agent") messages = await ai_client.agents.list_messages(thread_id=thread.id) print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py index b53df06275e9..9be377687e31 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py @@ -24,7 +24,7 @@ from typing import Any from azure.ai.client.aio import AzureAIClient -from azure.ai.client.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, SubmitToolOutputsAction, ThreadMessage, ThreadRun +from azure.ai.client.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun from azure.ai.client.models import AsyncAgentEventHandler, AsyncFunctionTool, AsyncToolSet from azure.ai.client.aio.operations import AgentsOperations from azure.identity import DefaultAzureCredential @@ -36,9 +36,6 @@ class MyEventHandler(AsyncAgentEventHandler): - def __init__(self, agents: AgentsOperations) -> None: - self._agents = agents - async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: for content_part in delta.delta.content: if isinstance(content_part, MessageDeltaTextContent): @@ -54,9 +51,6 @@ async def on_thread_run(self, run: "ThreadRun") -> None: if run.status == "failed": print(f"Run failed. Error: {run.last_error}") - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - await self._handle_submit_tool_outputs(run) - async def on_run_step(self, step: "RunStep") -> None: print(f"RunStep type: {step.type}, Status: {step.status}") @@ -69,32 +63,6 @@ async def on_done(self) -> None: async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - async def _handle_submit_tool_outputs(self, run: "ThreadRun") -> None: - if isinstance(run.required_action, SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print("No tool calls to execute.") - return - if not self._agents: - print("AssistantClient not set. Cannot execute tool calls using toolset.") - return - - toolset = self._agents.get_toolset() - if toolset: - tool_outputs = await toolset.execute_tool_calls(tool_calls) - else: - raise ValueError("Toolset is not available in the client.") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - async with await self._agents.submit_tool_outputs_to_stream( - thread_id=run.thread_id, - run_id=run.id, - tool_outputs=tool_outputs, - event_handler=self - ) as stream: - await stream.until_done() - async def main(): # Create an Azure AI Client from a connection string, copied from your AI Studio project. @@ -140,12 +108,12 @@ async def main(): async with await ai_client.agents.create_stream( thread_id=thread.id, assistant_id=agent.id, - event_handler=MyEventHandler(ai_client.agents) + event_handler=MyEventHandler() ) as stream: await stream.until_done() await ai_client.agents.delete_agent(agent.id) - print("Deleted assistant") + print("Deleted agent") messages = await ai_client.agents.list_messages(thread_id=thread.id) print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py index 9baf34739fd6..de5c64672b96 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py @@ -93,7 +93,7 @@ async def main(): print(f"Unhandled Event Type: {event_type}, Data: {event_data}") await ai_client.agents.delete_agent(agent.id) - print("Deleted assistant") + print("Deleted agent") messages = await ai_client.agents.list_messages(thread_id=thread.id) print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py index 0df2df3078b3..790a6117b871 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -91,7 +91,7 @@ async def main(): print("Deleted vectore store") await ai_client.agents.delete_agent(agent.id) - print("Deleted assistant") + print("Deleted agent") messages = await ai_client.agents.list_messages(thread_id=thread.id) print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py index 61f5ad76f3be..d5a8760fd00a 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py @@ -80,7 +80,7 @@ print("Deleted file") ai_client.agents.delete_agent(agent.id) - print("Deleted assistant") + print("Deleted agent") messages = ai_client.agents.list_messages(thread_id=thread.id) print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py index 1f61d647b01f..7fc4d15278ff 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py @@ -93,9 +93,9 @@ ai_client.agents.delete_vector_store(openai_vectorstore.id) print("Deleted vector store") - # Delete the assistant when done + # Delete the agent when done ai_client.agents.delete_agent(agent.id) - print("Deleted assistant") + print("Deleted agent") # Fetch and log all messages messages = ai_client.agents.list_messages(thread_id=thread.id) diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index 5628c36e7d9f..d294a1962c64 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -23,7 +23,7 @@ import os from azure.ai.client import AzureAIClient -from azure.ai.client.models import Agent, MessageDeltaChunk, MessageDeltaTextContent, RunStep, SubmitToolOutputsAction, ThreadMessage, ThreadRun +from azure.ai.client.models import Agent, MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun from azure.ai.client.models import AgentEventHandler from azure.ai.client.operations import AgentsOperations from azure.identity import DefaultAzureCredential @@ -60,9 +60,6 @@ class MyEventHandler(AgentEventHandler): - def __init__(self, agents: AgentsOperations): - self._agents = agents - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: for content_part in delta.delta.content: if isinstance(content_part, MessageDeltaTextContent): @@ -78,9 +75,6 @@ def on_thread_run(self, run: "ThreadRun") -> None: if run.status == "failed": print(f"Run failed. Error: {run.last_error}") - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - self._handle_submit_tool_outputs(run) - def on_run_step(self, step: "RunStep") -> None: print(f"RunStep type: {step.type}, Status: {step.status}") @@ -93,28 +87,6 @@ def on_done(self) -> None: def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - def _handle_submit_tool_outputs(self, run: "ThreadRun") -> None: - if isinstance(run.required_action, SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print("No tool calls to execute.") - return - - toolset = self._agents.get_toolset() - if toolset: - tool_outputs = toolset.execute_tool_calls(tool_calls) - else: - raise ValueError("Toolset is not available in the client.") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - with self._agents.submit_tool_outputs_to_stream( - thread_id=run.thread_id, - run_id=run.id, - tool_outputs=tool_outputs, - event_handler=self - ) as stream: - stream.until_done() with ai_client: @@ -136,12 +108,12 @@ def _handle_submit_tool_outputs(self, run: "ThreadRun") -> None: with ai_client.agents.create_stream( thread_id=thread.id, assistant_id=agent.id, - event_handler=MyEventHandler(ai_client.agents) + event_handler=MyEventHandler() ) as stream: stream.until_done() ai_client.agents.delete_agent(agent.id) - print("Deleted assistant") + print("Deleted agent") messages = ai_client.agents.list_messages(thread_id=thread.id) print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py index 7a400b2e6b60..0eeb6cbf401a 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py @@ -24,7 +24,7 @@ import os from azure.ai.client import AzureAIClient from azure.ai.client.models import AgentStreamEvent -from azure.ai.client.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, SubmitToolOutputsAction, ThreadMessage, ThreadRun +from azure.ai.client.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun from azure.ai.client.models import FunctionTool, ToolSet from azure.ai.client.operations import AgentsOperations from azure.identity import DefaultAzureCredential @@ -113,21 +113,6 @@ def handle_message_delta(delta: MessageDeltaChunk) -> None: if event_data.status == "failed": print(f"Run failed. Error: {event_data.last_error}") - - if event_data.status == "requires_action" and isinstance(event_data.required_action, SubmitToolOutputsAction): - tool_calls = event_data.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print("No tool calls to execute.") - break - - toolset = ai_client.agents.get_toolset() - if toolset: - tool_outputs = toolset.execute_tool_calls(tool_calls) - else: - raise ValueError("Toolset is not available in the client.") - - if tool_outputs: - handle_submit_tool_outputs(ai_client.agents, event_data.thread_id, event_data.id, tool_outputs) elif isinstance(event_data, RunStep): print(f"RunStep type: {event_data.type}, Status: {event_data.status}") @@ -143,7 +128,7 @@ def handle_message_delta(delta: MessageDeltaChunk) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") ai_client.agents.delete_agent(agent.id) - print("Deleted assistant") + print("Deleted agent") messages = ai_client.agents.list_messages(thread_id=thread.id) print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py index cd599c19347f..1edb7bd45410 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py @@ -90,7 +90,7 @@ print("Deleted vectore store") ai_client.agents.delete_agent(agent.id) - print("Deleted assistant") + print("Deleted agent") messages = ai_client.agents.list_messages(thread_id=thread.id) print(f"Messages: {messages}") From e231bc58c48febc94236aa2c27251e22c04bfe41 Mon Sep 17 00:00:00 2001 From: Ankit Singhal <30610298+singankit@users.noreply.github.com> Date: Wed, 16 Oct 2024 08:17:15 -0700 Subject: [PATCH 029/138] Evaluation API Review Feedback (#37915) * Evaluation API Review Feedback * add sample for evaluation schedule --------- Co-authored-by: Sai Kothinti --- .../azure/ai/client/__init__.py | 2 +- .../azure/ai/client/_client.py | 2 +- .../azure/ai/client/_serialization.py | 6 +- .../azure/ai/client/aio/__init__.py | 2 +- .../azure/ai/client/aio/_client.py | 2 +- .../ai/client/aio/operations/__init__.py | 2 +- .../ai/client/aio/operations/_operations.py | 422 +++--- .../azure/ai/client/aio/operations/_patch.py | 46 +- .../azure/ai/client/models/__init__.py | 36 +- .../azure/ai/client/models/_enums.py | 8 + .../azure/ai/client/models/_models.py | 1277 ++++++++++++----- .../azure/ai/client/models/_patch.py | 6 +- .../azure/ai/client/operations/__init__.py | 2 +- .../azure/ai/client/operations/_operations.py | 518 +++---- .../azure/ai/client/operations/_patch.py | 38 +- .../sample_agents_basics_async.py | 17 +- .../sample_agents_functions_async.py | 30 +- .../sample_evaluations_schedules.py | 65 + sdk/ai/azure-ai-client/tests/conftest.py | 2 +- sdk/ai/azure-ai-client/tsp-location.yaml | 2 +- 20 files changed, 1458 insertions(+), 1027 deletions(-) create mode 100644 sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations_schedules.py diff --git a/sdk/ai/azure-ai-client/azure/ai/client/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/__init__.py index a8002a0739d5..809ca737e8e4 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/__init__.py @@ -21,6 +21,6 @@ __all__ = [ "AzureAIClient", ] -__all__.extend([p for p in _patch_all if p not in __all__]) +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/_client.py index 39545e19ad8d..c697bfab91e5 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_client.py @@ -60,7 +60,7 @@ def __init__( credential: "TokenCredential", **kwargs: Any ) -> None: - _endpoint = "{endpoint}/{subscriptionId}/{resourceGroupName}/{projectName}" + _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" # pylint: disable=line-too-long self._config = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py b/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py index 7b3074215a30..480e941d758f 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py @@ -226,7 +226,7 @@ class _FixedOffset(datetime.tzinfo): # type: ignore :param datetime.timedelta offset: offset in timedelta format """ - def __init__(self, offset): + def __init__(self, offset) -> None: self.__offset = offset def utcoffset(self, dt): @@ -598,7 +598,7 @@ class Serializer(object): # pylint: disable=too-many-public-methods "multiple": lambda x, y: x % y != 0, } - def __init__(self, classes: Optional[Mapping[str, type]] = None): + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: self.serialize_type = { "iso-8601": Serializer.serialize_iso, "rfc-1123": Serializer.serialize_rfc, @@ -1452,7 +1452,7 @@ class Deserializer(object): valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") - def __init__(self, classes: Optional[Mapping[str, type]] = None): + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: self.deserialize_type = { "iso-8601": Deserializer.deserialize_iso, "rfc-1123": Deserializer.deserialize_rfc, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py index 682d7f1b46a7..773c2c5dc6e9 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py @@ -18,6 +18,6 @@ __all__ = [ "AzureAIClient", ] -__all__.extend([p for p in _patch_all if p not in __all__]) +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py index 0e194be24fc6..99f26fac57ca 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py @@ -60,7 +60,7 @@ def __init__( credential: "AsyncTokenCredential", **kwargs: Any ) -> None: - _endpoint = "{endpoint}/{subscriptionId}/{resourceGroupName}/{projectName}" + _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" # pylint: disable=line-too-long self._config = AzureAIClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py index 8e6a46afb1f5..1a46c6d8abb9 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py @@ -19,5 +19,5 @@ "EndpointsOperations", "EvaluationsOperations", ] -__all__.extend([p for p in _patch_all if p not in __all__]) +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py index 5a747e46f845..b9753729df22 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py @@ -75,14 +75,13 @@ build_agents_upload_file_request, build_endpoints_list_request, build_endpoints_list_secrets_request, + build_evaluations_create_or_replace_schedule_request, build_evaluations_create_request, - build_evaluations_create_schedule_request, build_evaluations_delete_schedule_request, build_evaluations_get_request, build_evaluations_get_schedule_request, build_evaluations_list_request, - build_evaluations_list_schedule_evaluations_request, - build_evaluations_list_schedules_request, + build_evaluations_list_schedule_request, build_evaluations_update_request, ) @@ -5189,13 +5188,83 @@ def __init__(self, *args, **kwargs) -> None: self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + @distributed_trace_async + async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: + """Resource read operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + _request = build_evaluations_get_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + @overload async def create( self, evaluation: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any ) -> _models.Evaluation: - """Creates an evaluation. + """Run the evaluation. - :param evaluation: Properties of Evaluation. Required. + :param evaluation: Evaluation to run. Required. :type evaluation: ~azure.ai.client.models.Evaluation :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -5209,9 +5278,9 @@ async def create( async def create( self, evaluation: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.Evaluation: - """Creates an evaluation. + """Run the evaluation. - :param evaluation: Properties of Evaluation. Required. + :param evaluation: Evaluation to run. Required. :type evaluation: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -5225,9 +5294,9 @@ async def create( async def create( self, evaluation: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.Evaluation: - """Creates an evaluation. + """Run the evaluation. - :param evaluation: Properties of Evaluation. Required. + :param evaluation: Evaluation to run. Required. :type evaluation: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -5239,9 +5308,9 @@ async def create( @distributed_trace_async async def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: - """Creates an evaluation. + """Run the evaluation. - :param evaluation: Properties of Evaluation. Is one of the following types: Evaluation, JSON, + :param evaluation: Evaluation to run. Is one of the following types: Evaluation, JSON, IO[bytes] Required. :type evaluation: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] :return: Evaluation. The Evaluation is compatible with MutableMapping @@ -5316,7 +5385,7 @@ async def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], * def list( self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any ) -> AsyncIterable["_models.Evaluation"]: - """List evaluations. + """Resource list operation template. :keyword top: The number of result items to return. Default value is None. :paramtype top: int @@ -5418,19 +5487,19 @@ async def get_next(next_link=None): async def update( self, id: str, - update_request: _models.UpdateEvaluationRequest, + resource: _models.Evaluation, *, - content_type: str = "application/json", + content_type: str = "application/merge-patch+json", **kwargs: Any ) -> _models.Evaluation: - """Update an evaluation. + """Resource update operation template. :param id: Identifier of the evaluation. Required. :type id: str - :param update_request: Update evaluation request. Required. - :type update_request: ~azure.ai.client.models.UpdateEvaluationRequest + :param resource: The resource instance. Required. + :type resource: ~azure.ai.client.models.Evaluation :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". + Default value is "application/merge-patch+json". :paramtype content_type: str :return: Evaluation. The Evaluation is compatible with MutableMapping :rtype: ~azure.ai.client.models.Evaluation @@ -5439,16 +5508,16 @@ async def update( @overload async def update( - self, id: str, update_request: JSON, *, content_type: str = "application/json", **kwargs: Any + self, id: str, resource: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> _models.Evaluation: - """Update an evaluation. + """Resource update operation template. :param id: Identifier of the evaluation. Required. :type id: str - :param update_request: Update evaluation request. Required. - :type update_request: JSON + :param resource: The resource instance. Required. + :type resource: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". + Default value is "application/merge-patch+json". :paramtype content_type: str :return: Evaluation. The Evaluation is compatible with MutableMapping :rtype: ~azure.ai.client.models.Evaluation @@ -5457,16 +5526,16 @@ async def update( @overload async def update( - self, id: str, update_request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, id: str, resource: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> _models.Evaluation: - """Update an evaluation. + """Resource update operation template. :param id: Identifier of the evaluation. Required. :type id: str - :param update_request: Update evaluation request. Required. - :type update_request: IO[bytes] + :param resource: The resource instance. Required. + :type resource: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". + Default value is "application/merge-patch+json". :paramtype content_type: str :return: Evaluation. The Evaluation is compatible with MutableMapping :rtype: ~azure.ai.client.models.Evaluation @@ -5475,15 +5544,15 @@ async def update( @distributed_trace_async async def update( - self, id: str, update_request: Union[_models.UpdateEvaluationRequest, JSON, IO[bytes]], **kwargs: Any + self, id: str, resource: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any ) -> _models.Evaluation: - """Update an evaluation. + """Resource update operation template. :param id: Identifier of the evaluation. Required. :type id: str - :param update_request: Update evaluation request. Is one of the following types: - UpdateEvaluationRequest, JSON, IO[bytes] Required. - :type update_request: ~azure.ai.client.models.UpdateEvaluationRequest or JSON or IO[bytes] + :param resource: The resource instance. Is one of the following types: Evaluation, JSON, + IO[bytes] Required. + :type resource: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] :return: Evaluation. The Evaluation is compatible with MutableMapping :rtype: ~azure.ai.client.models.Evaluation :raises ~azure.core.exceptions.HttpResponseError: @@ -5502,12 +5571,12 @@ async def update( content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - content_type = content_type or "application/json" + content_type = content_type or "application/merge-patch+json" _content = None - if isinstance(update_request, (IOBase, bytes)): - _content = update_request + if isinstance(resource, (IOBase, bytes)): + _content = resource else: - _content = json.dumps(update_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_evaluations_update_request( id=id, @@ -5543,24 +5612,29 @@ async def update( map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + if _stream: deserialized = response.iter_bytes() else: deserialized = _deserialize(_models.Evaluation, response.json()) if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore @distributed_trace_async - async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: - """Get an evaluation. + async def get_schedule(self, id: str, **kwargs: Any) -> _models.EvaluationSchedule: + """Resource read operation template. :param id: Identifier of the evaluation. Required. :type id: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.client.models.EvaluationSchedule :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -5574,9 +5648,9 @@ async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) - _request = build_evaluations_get_request( + _request = build_evaluations_get_schedule_request( id=id, api_version=self._config.api_version, headers=_headers, @@ -5608,24 +5682,31 @@ async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Evaluation, response.json()) + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore @overload - async def create_schedule( - self, body: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any + async def create_or_replace_schedule( + self, id: str, resource: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any ) -> _models.EvaluationSchedule: - """Creates an evaluation schedule. + """Create or replace operation template. - :param body: Properties of Evaluation Schedule. Required. - :type body: ~azure.ai.client.models.EvaluationSchedule + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: ~azure.ai.client.models.EvaluationSchedule :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -5635,13 +5716,15 @@ async def create_schedule( """ @overload - async def create_schedule( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + async def create_or_replace_schedule( + self, id: str, resource: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.EvaluationSchedule: - """Creates an evaluation schedule. + """Create or replace operation template. - :param body: Properties of Evaluation Schedule. Required. - :type body: JSON + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -5651,13 +5734,15 @@ async def create_schedule( """ @overload - async def create_schedule( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + async def create_or_replace_schedule( + self, id: str, resource: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.EvaluationSchedule: - """Creates an evaluation schedule. + """Create or replace operation template. - :param body: Properties of Evaluation Schedule. Required. - :type body: IO[bytes] + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -5667,14 +5752,16 @@ async def create_schedule( """ @distributed_trace_async - async def create_schedule( - self, body: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any + async def create_or_replace_schedule( + self, id: str, resource: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any ) -> _models.EvaluationSchedule: - """Creates an evaluation schedule. + """Create or replace operation template. - :param body: Properties of Evaluation Schedule. Is one of the following types: - EvaluationSchedule, JSON, IO[bytes] Required. - :type body: ~azure.ai.client.models.EvaluationSchedule or JSON or IO[bytes] + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Is one of the following types: EvaluationSchedule, + JSON, IO[bytes] Required. + :type resource: ~azure.ai.client.models.EvaluationSchedule or JSON or IO[bytes] :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping :rtype: ~azure.ai.client.models.EvaluationSchedule :raises ~azure.core.exceptions.HttpResponseError: @@ -5695,12 +5782,13 @@ async def create_schedule( content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(resource, (IOBase, bytes)): + _content = resource else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_evaluations_create_schedule_request( + _request = build_evaluations_create_or_replace_schedule_request( + id=id, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -5724,7 +5812,7 @@ async def create_schedule( response = pipeline_response.http_response - if response.status_code not in [201]: + if response.status_code not in [200, 201]: if _stream: try: await response.read() # Load the body in memory and close the socket @@ -5733,86 +5821,26 @@ async def create_schedule( map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.EvaluationSchedule, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_schedule(self, id: str, **kwargs: Any) -> _models.EvaluationSchedule: - """Get an evaluation schedule along with runs. - - :param id: Identifier of the evaluation schedule. Required. - :type id: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.client.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) - - _request = build_evaluations_get_schedule_request( - id=id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - if _stream: deserialized = response.iter_bytes() else: deserialized = _deserialize(_models.EvaluationSchedule, response.json()) if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore @distributed_trace - def list_schedules( + def list_schedule( self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any ) -> AsyncIterable["_models.EvaluationSchedule"]: - """List evaluation schedules. + """Resource list operation template. :keyword top: The number of result items to return. Default value is None. :paramtype top: int @@ -5839,7 +5867,7 @@ def list_schedules( def prepare_request(next_link=None): if not next_link: - _request = build_evaluations_list_schedules_request( + _request = build_evaluations_list_schedule_request( top=top, skip=skip, maxpagesize=maxpagesize, @@ -5910,116 +5938,11 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) - @distributed_trace - def list_schedule_evaluations( - self, id: str, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any - ) -> AsyncIterable["_models.Evaluation"]: - """List evaluations under a schedule. - - :param id: Identifier of the evaluation schedule. Required. - :type id: str - :keyword top: The number of result items to return. Default value is None. - :paramtype top: int - :keyword skip: The number of result items to skip. Default value is None. - :paramtype skip: int - :return: An iterator like instance of Evaluation - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.client.models.Evaluation] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_evaluations_list_schedule_evaluations_request( - id=id, - top=top, - skip=skip, - maxpagesize=maxpagesize, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - async def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return AsyncItemPaged(get_next, extract_data) - @distributed_trace_async async def delete_schedule(self, id: str, **kwargs: Any) -> None: - """Delete an evaluation schedule. + """Resource delete operation template. - :param id: Identifier of the evaluation schedule. Required. + :param id: Identifier of the evaluation. Required. :type id: str :return: None :rtype: None @@ -6065,5 +5988,10 @@ async def delete_schedule(self, id: str, **kwargs: Any) -> None: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + if cls: - return cls(pipeline_response, None, {}) # type: ignore + return cls(pipeline_response, None, response_headers) # type: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index 2bcb657ab443..e1731fdf71d3 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -1,3 +1,4 @@ +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -125,16 +126,14 @@ async def get_azure_openai_client(self) -> "AsyncAzureOpenAI": # Pick latest GA version from the "Data plane - Inference" row in the table # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - AZURE_OPENAI_API_VERSION = "2024-06-01" + AZURE_OPENAI_API_VERSION = "2024-06-01" if endpoint.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" ) client = AsyncAzureOpenAI( - api_key=endpoint.key, - azure_endpoint=endpoint.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION + api_key=endpoint.key, azure_endpoint=endpoint.endpoint_url, api_version=AZURE_OPENAI_API_VERSION ) elif endpoint.authentication_type == AuthenticationType.AAD: logger.debug( @@ -161,7 +160,7 @@ async def get_azure_openai_client(self) -> "AsyncAzureOpenAI": endpoint.token_credential, "https://cognitiveservices.azure.com/.default" ), azure_endpoint=endpoint.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION + api_version=AZURE_OPENAI_API_VERSION, ) else: raise ValueError("Unknown authentication type") @@ -229,8 +228,9 @@ async def list( else: yield await self.get(endpoint_name=connection.name, populate_secrets=True) + class AgentsOperations(AgentsOperationsGenerated): - + @overload async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: """Creates a new agent. @@ -309,7 +309,9 @@ async def create_agent( """ @overload - async def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + async def create_agent( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: """Creates a new agent. :param body: Required. @@ -774,7 +776,7 @@ async def create_and_process_run( metadata=metadata, **kwargs, ) - + # Monitor and process the run status while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(sleep_interval) @@ -1378,9 +1380,7 @@ async def upload_file( """ @overload - async def upload_file( - self, file_path: str, *, purpose: str, **kwargs: Any - ) -> _models.OpenAIFile: + async def upload_file(self, file_path: str, *, purpose: str, **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. :param file_path: Required. @@ -1391,7 +1391,7 @@ async def upload_file( :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: ~azure.ai.client.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: - """ + """ @distributed_trace_async async def upload_file( @@ -1402,7 +1402,7 @@ async def upload_file( file_path: Optional[str] = None, purpose: Optional[Union[str, _models.FilePurpose]] = None, filename: Optional[str] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.OpenAIFile: """ Uploads a file for use by other operations, delegating to the generated operations. @@ -1421,7 +1421,7 @@ async def upload_file( """ if body is not None: return await super().upload_file(body=body, **kwargs) - + if isinstance(purpose, FilePurpose): purpose = purpose.value @@ -1462,7 +1462,13 @@ async def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kw @overload async def upload_file_and_poll( - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, sleep_interval: float = 1, **kwargs: Any + self, + *, + file: FileType, + purpose: Union[str, _models.FilePurpose], + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, ) -> _models.OpenAIFile: """Uploads a file for use by other operations. @@ -1498,7 +1504,7 @@ async def upload_file_and_poll( :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: ~azure.ai.client.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: - """ + """ @distributed_trace_async async def upload_file_and_poll( @@ -1510,7 +1516,7 @@ async def upload_file_and_poll( purpose: Optional[Union[str, _models.FilePurpose]] = None, filename: Optional[str] = None, sleep_interval: float = 1, - **kwargs: Any + **kwargs: Any, ) -> _models.OpenAIFile: """ Uploads a file for use by other operations, delegating to the generated operations. @@ -1547,7 +1553,7 @@ async def upload_file_and_poll( uploaded_file = await self.get_file(uploaded_file.id) return uploaded_file - + @overload async def create_vector_store_and_poll( self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any @@ -1577,8 +1583,8 @@ async def create_vector_store_and_poll( expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any + sleep_interval: float = 1, + **kwargs: Any, ) -> _models.VectorStore: """Creates a vector store and poll. diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py index ecf84301a862..aac7dc898b4b 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py @@ -13,8 +13,14 @@ from ._models import AgentsApiResponseFormat from ._models import AgentsNamedToolChoice from ._models import AppInsightsConfiguration +from ._models import AzureAISearchResource +from ._models import AzureAISearchToolDefinition +from ._models import BingSearchToolDefinition from ._models import CodeInterpreterToolDefinition from ._models import CodeInterpreterToolResource +from ._models import ConnectionListResource +from ._models import ConnectionResource +from ._models import CronTrigger from ._models import Dataset from ._models import Evaluation from ._models import EvaluationSchedule @@ -28,6 +34,7 @@ from ._models import FunctionDefinition from ._models import FunctionName from ._models import FunctionToolDefinition +from ._models import IndexResource from ._models import InputData from ._models import MessageAttachment from ._models import MessageContent @@ -53,6 +60,7 @@ from ._models import MessageTextFileCitationDetails from ._models import MessageTextFilePathAnnotation from ._models import MessageTextFilePathDetails +from ._models import MicrosoftFabricToolDefinition from ._models import OpenAIFile from ._models import OpenAIPageableListOfAgent from ._models import OpenAIPageableListOfRunStep @@ -60,8 +68,8 @@ from ._models import OpenAIPageableListOfThreadRun from ._models import OpenAIPageableListOfVectorStore from ._models import OpenAIPageableListOfVectorStoreFile -from ._models import Recurrence from ._models import RecurrenceSchedule +from ._models import RecurrenceTrigger from ._models import RequiredAction from ._models import RequiredFunctionToolCall from ._models import RequiredFunctionToolCallDetails @@ -69,6 +77,8 @@ from ._models import RunCompletionUsage from ._models import RunError from ._models import RunStep +from ._models import RunStepAzureAISearchToolCall +from ._models import RunStepBingSearchToolCall from ._models import RunStepCodeInterpreterImageOutput from ._models import RunStepCodeInterpreterImageReference from ._models import RunStepCodeInterpreterLogOutput @@ -99,9 +109,12 @@ from ._models import RunStepFunctionToolCallDetails from ._models import RunStepMessageCreationDetails from ._models import RunStepMessageCreationReference +from ._models import RunStepMicrosoftFabricToolCall +from ._models import RunStepSharepointToolCall from ._models import RunStepToolCall from ._models import RunStepToolCallDetails from ._models import SamplingStrategy +from ._models import SharepointToolDefinition from ._models import SubmitToolOutputsAction from ._models import SubmitToolOutputsDetails from ._models import SystemData @@ -112,9 +125,9 @@ from ._models import ToolDefinition from ._models import ToolOutput from ._models import ToolResources +from ._models import Trigger from ._models import TruncationObject from ._models import UpdateCodeInterpreterToolResourceOptions -from ._models import UpdateEvaluationRequest from ._models import UpdateFileSearchToolResourceOptions from ._models import UpdateToolResourcesOptions from ._models import VectorStore @@ -180,8 +193,14 @@ "AgentsApiResponseFormat", "AgentsNamedToolChoice", "AppInsightsConfiguration", + "AzureAISearchResource", + "AzureAISearchToolDefinition", + "BingSearchToolDefinition", "CodeInterpreterToolDefinition", "CodeInterpreterToolResource", + "ConnectionListResource", + "ConnectionResource", + "CronTrigger", "Dataset", "Evaluation", "EvaluationSchedule", @@ -195,6 +214,7 @@ "FunctionDefinition", "FunctionName", "FunctionToolDefinition", + "IndexResource", "InputData", "MessageAttachment", "MessageContent", @@ -220,6 +240,7 @@ "MessageTextFileCitationDetails", "MessageTextFilePathAnnotation", "MessageTextFilePathDetails", + "MicrosoftFabricToolDefinition", "OpenAIFile", "OpenAIPageableListOfAgent", "OpenAIPageableListOfRunStep", @@ -227,8 +248,8 @@ "OpenAIPageableListOfThreadRun", "OpenAIPageableListOfVectorStore", "OpenAIPageableListOfVectorStoreFile", - "Recurrence", "RecurrenceSchedule", + "RecurrenceTrigger", "RequiredAction", "RequiredFunctionToolCall", "RequiredFunctionToolCallDetails", @@ -236,6 +257,8 @@ "RunCompletionUsage", "RunError", "RunStep", + "RunStepAzureAISearchToolCall", + "RunStepBingSearchToolCall", "RunStepCodeInterpreterImageOutput", "RunStepCodeInterpreterImageReference", "RunStepCodeInterpreterLogOutput", @@ -266,9 +289,12 @@ "RunStepFunctionToolCallDetails", "RunStepMessageCreationDetails", "RunStepMessageCreationReference", + "RunStepMicrosoftFabricToolCall", + "RunStepSharepointToolCall", "RunStepToolCall", "RunStepToolCallDetails", "SamplingStrategy", + "SharepointToolDefinition", "SubmitToolOutputsAction", "SubmitToolOutputsDetails", "SystemData", @@ -279,9 +305,9 @@ "ToolDefinition", "ToolOutput", "ToolResources", + "Trigger", "TruncationObject", "UpdateCodeInterpreterToolResourceOptions", - "UpdateEvaluationRequest", "UpdateFileSearchToolResourceOptions", "UpdateToolResourcesOptions", "VectorStore", @@ -335,5 +361,5 @@ "VectorStoreStatus", "WeekDays", ] -__all__.extend([p for p in _patch_all if p not in __all__]) +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py index 676321444449..8dc539649db7 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py @@ -37,6 +37,14 @@ class AgentsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Tool type ``code_interpreter``""" FILE_SEARCH = "file_search" """Tool type ``file_search``""" + BING_GROUNDING = "bing_grounding" + """Tool type ``bing_grounding``""" + MICROSOFT_FABRIC = "microsoft_fabric" + """Tool type ``microsoft_fabric``""" + SHAREPOINT = "sharepoint" + """Tool type ``sharepoint``""" + AZURE_AI_SEARCH = "azure_ai_search" + """Tool type ``azure_ai_search``""" class AgentStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py index 1ba759bda534..d7d0c49738f3 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py @@ -128,10 +128,10 @@ def __init__( top_p: float, metadata: Dict[str, str], response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -171,10 +171,10 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin deleted: bool, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -203,10 +203,10 @@ def __init__( self, *, type: Optional[Union[str, "_models.ApiResponseFormat"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -221,7 +221,8 @@ class AgentsNamedToolChoice(_model_base.Model): :ivar type: the type of tool. If type is ``function``\\ , the function name must be set. - Required. Known values are: "function", "code_interpreter", and "file_search". + Required. Known values are: "function", "code_interpreter", "file_search", "bing_grounding", + "microsoft_fabric", "sharepoint", and "azure_ai_search". :vartype type: str or ~azure.ai.client.models.AgentsNamedToolChoiceType :ivar function: The name of the function to call. :vartype function: ~azure.ai.client.models.FunctionName @@ -229,7 +230,8 @@ class AgentsNamedToolChoice(_model_base.Model): type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field() """the type of tool. If type is ``function``\ , the function name must be set. Required. Known - values are: \"function\", \"code_interpreter\", and \"file_search\".""" + values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", + \"microsoft_fabric\", \"sharepoint\", and \"azure_ai_search\".""" function: Optional["_models.FunctionName"] = rest_field() """The name of the function to call.""" @@ -239,10 +241,10 @@ def __init__( *, type: Union[str, "_models.AgentsNamedToolChoiceType"], function: Optional["_models.FunctionName"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -302,10 +304,10 @@ def __init__( created_at: datetime.datetime, tool_resources: "_models.ToolResources", metadata: Dict[str, str], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -353,10 +355,10 @@ def __init__( messages: Optional[List["_models.ThreadMessageOptions"]] = None, tool_resources: Optional["_models.ToolResources"] = None, metadata: Optional[Dict[str, str]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -386,10 +388,10 @@ def __init__( self, *, type: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -431,10 +433,10 @@ def __init__( resource_id: str, query: str, service_name: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -444,11 +446,43 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, type="app_insights", **kwargs) +class AzureAISearchResource(_model_base.Model): + """A set of index resources used by the ``azure_ai_search`` tool. + + :ivar index_list: The indices attached to this agent. There can be a maximum of 1 index + resource attached to the agent. + :vartype index_list: list[~azure.ai.client.models.IndexResource] + """ + + index_list: Optional[List["_models.IndexResource"]] = rest_field(name="indexes") + """The indices attached to this agent. There can be a maximum of 1 index + resource attached to the agent.""" + + @overload + def __init__( + self, + *, + index_list: Optional[List["_models.IndexResource"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + class ToolDefinition(_model_base.Model): """An abstract representation of an input tool definition that an agent can use. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - CodeInterpreterToolDefinition, FileSearchToolDefinition, FunctionToolDefinition + AzureAISearchToolDefinition, BingSearchToolDefinition, CodeInterpreterToolDefinition, + FileSearchToolDefinition, FunctionToolDefinition, MicrosoftFabricToolDefinition, + SharepointToolDefinition :ivar type: The object type. Required. Default value is None. @@ -464,10 +498,10 @@ def __init__( self, *, type: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -477,6 +511,63 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) +class AzureAISearchToolDefinition(ToolDefinition, discriminator="azure_ai_search"): + """The input definition information for an Azure AI search tool as used to configure an agent. + + + :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is + "azure_ai_search". + :vartype type: str + """ + + type: Literal["azure_ai_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'azure_ai_search'. Required. Default value is + \"azure_ai_search\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="azure_ai_search", **kwargs) + + +class BingSearchToolDefinition(ToolDefinition, discriminator="bing_search"): + """The input definition information for a bing search tool as used to configure an agent. + + + :ivar type: The object type, which is always 'bing_search'. Required. Default value is + "bing_search". + :vartype type: str + """ + + type: Literal["bing_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'bing_search'. Required. Default value is \"bing_search\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="bing_search", **kwargs) + + class CodeInterpreterToolDefinition(ToolDefinition, discriminator="code_interpreter"): """The input definition information for a code interpreter tool as used to configure an agent. @@ -493,10 +584,10 @@ class CodeInterpreterToolDefinition(ToolDefinition, discriminator="code_interpre @overload def __init__( self, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -525,10 +616,42 @@ def __init__( self, *, file_ids: Optional[List[str]] = None, - ): ... + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class ConnectionListResource(_model_base.Model): + """A set of connection resources currently used by either the ``bing_search``\\ , + ``microsoft_fabric``\\ , or ``sharepoint`` tools. + + :ivar connection_list: The connections attached to this agent. There can be a maximum of 1 + connection + resource attached to the agent. + :vartype connection_list: list[~azure.ai.client.models.ConnectionResource] + """ + + connection_list: Optional[List["_models.ConnectionResource"]] = rest_field(name="connections") + """The connections attached to this agent. There can be a maximum of 1 connection + resource attached to the agent.""" + + @overload + def __init__( + self, + *, + connection_list: Optional[List["_models.ConnectionResource"]] = None, + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -631,6 +754,35 @@ class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): """The connection URL to be used for this service. Required.""" +class ConnectionResource(_model_base.Model): + """A connection resource. + + + :ivar connection_id: A connection in a ConnectionListResource attached to this agent. Required. + :vartype connection_id: str + """ + + connection_id: str = rest_field() + """A connection in a ConnectionListResource attached to this agent. Required.""" + + @overload + def __init__( + self, + *, + connection_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + class ConnectionsListResponse(_model_base.Model): """Response from the list operation. @@ -683,6 +835,74 @@ class CredentialsSASAuth(_model_base.Model): """The Shared Access Signatures (SAS) token. Required.""" +class Trigger(_model_base.Model): + """Abstract data class for input data configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + CronTrigger, RecurrenceTrigger + + + :ivar type: Type of the trigger. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """Type of the trigger. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class CronTrigger(Trigger, discriminator="Cron"): + """Cron Trigger Definition. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar type: Required. Default value is "Cron". + :vartype type: str + :ivar expression: Cron expression for the trigger. Required. + :vartype expression: str + """ + + type: Literal["Cron"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore + """Required. Default value is \"Cron\".""" + expression: str = rest_field() + """Cron expression for the trigger. Required.""" + + @overload + def __init__( + self, + *, + expression: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="Cron", **kwargs) + + class Dataset(InputData, discriminator="dataset"): """Dataset as source for evaluation. @@ -705,10 +925,10 @@ def __init__( self, *, id: str, # pylint: disable=redefined-builtin - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -724,7 +944,7 @@ class Evaluation(_model_base.Model): Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar id: Identifier of the evaluation. + :ivar id: Identifier of the evaluation. Required. :vartype id: str :ivar data: Data for evaluation. Required. :vartype data: ~azure.ai.client.models.InputData @@ -747,9 +967,9 @@ class Evaluation(_model_base.Model): :vartype evaluators: dict[str, ~azure.ai.client.models.EvaluatorConfiguration] """ - id: Optional[str] = rest_field() - """Identifier of the evaluation.""" - data: "_models.InputData" = rest_field() + id: str = rest_field(visibility=["read"]) + """Identifier of the evaluation. Required.""" + data: "_models.InputData" = rest_field(visibility=["read", "create"]) """Data for evaluation. Required.""" display_name: Optional[str] = rest_field(name="displayName") """Display Name for evaluation. It helps to find evaluation easily in AI Studio. It does not need @@ -763,10 +983,10 @@ class Evaluation(_model_base.Model): """Status of the evaluation. It is set by service and is read-only.""" tags: Optional[Dict[str, str]] = rest_field() """Evaluation's tags. Unlike properties, tags are fully mutable.""" - properties: Optional[Dict[str, str]] = rest_field() + properties: Optional[Dict[str, str]] = rest_field(visibility=["read", "create"]) """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be removed.""" - evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field() + evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field(visibility=["read", "create"]) """Evaluators to be used for the evaluation. Required.""" @overload @@ -775,15 +995,14 @@ def __init__( *, data: "_models.InputData", evaluators: Dict[str, "_models.EvaluatorConfiguration"], - id: Optional[str] = None, # pylint: disable=redefined-builtin display_name: Optional[str] = None, description: Optional[str] = None, tags: Optional[Dict[str, str]] = None, properties: Optional[Dict[str, str]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -799,7 +1018,7 @@ class EvaluationSchedule(_model_base.Model): # pylint: disable=too-many-instanc Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar id: Identifier of the evaluation. + :ivar id: Identifier of the evaluation. Required. :vartype id: str :ivar data: Data for evaluation. Required. :vartype data: ~azure.ai.client.models.InputData @@ -811,8 +1030,8 @@ class EvaluationSchedule(_model_base.Model): # pylint: disable=too-many-instanc :vartype description: str :ivar system_data: Metadata containing createdBy and modifiedBy information. :vartype system_data: ~azure.ai.client.models.SystemData - :ivar status: Status of the evaluation. It is set by service and is read-only. - :vartype status: str + :ivar provisioning_status: Status of the evaluation. It is set by service and is read-only. + :vartype provisioning_status: str :ivar tags: Evaluation's tags. Unlike properties, tags are fully mutable. :vartype tags: dict[str, str] :ivar properties: Evaluation's properties. Unlike tags, properties are add-only. Once added, a @@ -820,17 +1039,15 @@ class EvaluationSchedule(_model_base.Model): # pylint: disable=too-many-instanc :vartype properties: dict[str, str] :ivar evaluators: Evaluators to be used for the evaluation. Required. :vartype evaluators: dict[str, ~azure.ai.client.models.EvaluatorConfiguration] - :ivar recurrence: Recurrence pattern for the evaluation. - :vartype recurrence: ~azure.ai.client.models.Recurrence - :ivar cron_expression: Cron expression for the evaluation. - :vartype cron_expression: str + :ivar trigger: Trigger for the evaluation. Required. + :vartype trigger: ~azure.ai.client.models.Trigger :ivar sampling_strategy: Sampling strategy for the evaluation. Required. :vartype sampling_strategy: ~azure.ai.client.models.SamplingStrategy """ - id: Optional[str] = rest_field() - """Identifier of the evaluation.""" - data: "_models.InputData" = rest_field() + id: str = rest_field(visibility=["read"]) + """Identifier of the evaluation. Required.""" + data: "_models.InputData" = rest_field(visibility=["read", "create"]) """Data for evaluation. Required.""" display_name: Optional[str] = rest_field(name="displayName") """Display Name for evaluation. It helps to find evaluation easily in AI Studio. It does not need @@ -840,19 +1057,17 @@ class EvaluationSchedule(_model_base.Model): # pylint: disable=too-many-instanc evaluation and is mutable.""" system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) """Metadata containing createdBy and modifiedBy information.""" - status: Optional[str] = rest_field(visibility=["read"]) + provisioning_status: Optional[str] = rest_field(name="provisioningStatus", visibility=["read"]) """Status of the evaluation. It is set by service and is read-only.""" tags: Optional[Dict[str, str]] = rest_field() """Evaluation's tags. Unlike properties, tags are fully mutable.""" - properties: Optional[Dict[str, str]] = rest_field() + properties: Optional[Dict[str, str]] = rest_field(visibility=["read", "create"]) """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be removed.""" - evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field() + evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field(visibility=["read", "create"]) """Evaluators to be used for the evaluation. Required.""" - recurrence: Optional["_models.Recurrence"] = rest_field() - """Recurrence pattern for the evaluation.""" - cron_expression: Optional[str] = rest_field(name="cronExpression") - """Cron expression for the evaluation.""" + trigger: "_models.Trigger" = rest_field() + """Trigger for the evaluation. Required.""" sampling_strategy: "_models.SamplingStrategy" = rest_field(name="samplingStrategy") """Sampling strategy for the evaluation. Required.""" @@ -862,18 +1077,16 @@ def __init__( *, data: "_models.InputData", evaluators: Dict[str, "_models.EvaluatorConfiguration"], + trigger: "_models.Trigger", sampling_strategy: "_models.SamplingStrategy", - id: Optional[str] = None, # pylint: disable=redefined-builtin display_name: Optional[str] = None, description: Optional[str] = None, tags: Optional[Dict[str, str]] = None, properties: Optional[Dict[str, str]] = None, - recurrence: Optional["_models.Recurrence"] = None, - cron_expression: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -909,10 +1122,10 @@ def __init__( id: str, # pylint: disable=redefined-builtin init_params: Optional[Dict[str, Any]] = None, data_mapping: Optional[Dict[str, str]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -938,10 +1151,10 @@ def __init__( self, *, content: bytes, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -978,10 +1191,10 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin deleted: bool, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1014,10 +1227,10 @@ def __init__( self, *, data: List["_models.OpenAIFile"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1049,10 +1262,10 @@ def __init__( self, *, file_search: Optional["_models.FileSearchToolDefinitionDetails"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1086,10 +1299,10 @@ def __init__( self, *, max_num_results: Optional[int] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1117,10 +1330,10 @@ def __init__( self, *, vector_store_ids: Optional[List[str]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1159,10 +1372,10 @@ def __init__( name: str, parameters: Any, description: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1188,10 +1401,10 @@ def __init__( self, *, name: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1222,10 +1435,10 @@ def __init__( self, *, function: "_models.FunctionDefinition", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1235,6 +1448,41 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, type="function", **kwargs) +class IndexResource(_model_base.Model): + """A Index resource. + + + :ivar index_connection_id: An index connection id in an IndexResource attached to this agent. + Required. + :vartype index_connection_id: str + :ivar index_name: The name of an index in an IndexResource attached to this agent. Required. + :vartype index_name: str + """ + + index_connection_id: str = rest_field() + """An index connection id in an IndexResource attached to this agent. Required.""" + index_name: str = rest_field() + """The name of an index in an IndexResource attached to this agent. Required.""" + + @overload + def __init__( + self, + *, + index_connection_id: str, + index_name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + class MessageAttachment(_model_base.Model): """This describes to which tools a file has been attached. @@ -1257,10 +1505,10 @@ def __init__( *, file_id: str, tools: List["_types.MessageAttachmentToolDefinition"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1290,10 +1538,10 @@ def __init__( self, *, type: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1325,10 +1573,10 @@ def __init__( *, role: Union[str, "_models.MessageRole"], content: List["_models.MessageDeltaContent"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1367,10 +1615,10 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin delta: "_models.MessageDelta", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1406,10 +1654,10 @@ def __init__( *, index: int, type: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1444,10 +1692,10 @@ def __init__( *, index: int, image_file: Optional["_models.MessageDeltaImageFileContentObject"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1472,10 +1720,10 @@ def __init__( self, *, file_id: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1510,10 +1758,10 @@ def __init__( *, index: int, type: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1548,10 +1796,10 @@ def __init__( *, index: int, text: Optional["_models.MessageDeltaTextContentObject"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1581,10 +1829,10 @@ def __init__( *, value: Optional[str] = None, annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1634,10 +1882,10 @@ def __init__( text: Optional[str] = None, start_index: Optional[int] = None, end_index: Optional[int] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1667,10 +1915,10 @@ def __init__( *, file_id: Optional[str] = None, quote: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1720,10 +1968,10 @@ def __init__( start_index: Optional[int] = None, end_index: Optional[int] = None, text: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1749,10 +1997,10 @@ def __init__( self, *, file_id: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1783,10 +2031,10 @@ def __init__( self, *, image_file: "_models.MessageImageFileDetails", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1812,10 +2060,10 @@ def __init__( self, *, file_id: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1845,10 +2093,10 @@ def __init__( self, *, reason: Union[str, "_models.MessageIncompleteDetailsReason"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1883,10 +2131,10 @@ def __init__( *, type: str, text: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1916,10 +2164,10 @@ def __init__( self, *, text: "_models.MessageTextDetails", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -1950,10 +2198,10 @@ def __init__( *, value: str, annotations: List["_models.MessageTextAnnotation"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2002,10 +2250,10 @@ def __init__( file_citation: "_models.MessageTextFileCitationDetails", start_index: Optional[int] = None, end_index: Optional[int] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2037,10 +2285,10 @@ def __init__( *, file_id: str, quote: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2086,10 +2334,10 @@ def __init__( file_path: "_models.MessageTextFilePathDetails", start_index: Optional[int] = None, end_index: Optional[int] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2115,10 +2363,10 @@ def __init__( self, *, file_id: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2128,6 +2376,35 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) +class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="microsoft_fabric"): + """The input definition information for a Microsoft Fabric tool as used to configure an agent. + + + :ivar type: The object type, which is always 'microsoft_fabric'. Required. Default value is + "microsoft_fabric". + :vartype type: str + """ + + type: Literal["microsoft_fabric"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'microsoft_fabric'. Required. Default value is + \"microsoft_fabric\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="microsoft_fabric", **kwargs) + + class OpenAIFile(_model_base.Model): """Represents an agent that can call the model and use tools. @@ -2189,10 +2466,10 @@ def __init__( purpose: Union[str, "_models.FilePurpose"], status: Optional[Union[str, "_models.FileState"]] = None, status_details: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2242,10 +2519,10 @@ def __init__( first_id: str, last_id: str, has_more: bool, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2295,10 +2572,10 @@ def __init__( first_id: str, last_id: str, has_more: bool, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2348,10 +2625,10 @@ def __init__( first_id: str, last_id: str, has_more: bool, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2401,10 +2678,10 @@ def __init__( first_id: str, last_id: str, has_more: bool, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2454,10 +2731,10 @@ def __init__( first_id: str, last_id: str, has_more: bool, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2507,10 +2784,10 @@ def __init__( first_id: str, last_id: str, has_more: bool, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2521,47 +2798,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.object: Literal["list"] = "list" -class Recurrence(_model_base.Model): - """Recurrence Definition. - - - :ivar frequency: The frequency to trigger schedule. Required. Known values are: "Month", - "Week", "Day", "Hour", and "Minute". - :vartype frequency: str or ~azure.ai.client.models.Frequency - :ivar interval: Specifies schedule interval in conjunction with frequency. Required. - :vartype interval: int - :ivar schedule: The recurrence schedule. Required. - :vartype schedule: ~azure.ai.client.models.RecurrenceSchedule - """ - - frequency: Union[str, "_models.Frequency"] = rest_field() - """The frequency to trigger schedule. Required. Known values are: \"Month\", \"Week\", \"Day\", - \"Hour\", and \"Minute\".""" - interval: int = rest_field() - """Specifies schedule interval in conjunction with frequency. Required.""" - schedule: "_models.RecurrenceSchedule" = rest_field() - """The recurrence schedule. Required.""" - - @overload - def __init__( - self, - *, - frequency: Union[str, "_models.Frequency"], - interval: int, - schedule: "_models.RecurrenceSchedule", - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - class RecurrenceSchedule(_model_base.Model): """RecurrenceSchedule Definition. @@ -2593,10 +2829,10 @@ def __init__( minutes: List[int], week_days: List[Union[str, "_models.WeekDays"]], month_days: List[int], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2606,6 +2842,53 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) +class RecurrenceTrigger(Trigger, discriminator="Recurrence"): + """Recurrence Trigger Definition. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar type: Required. Default value is "Recurrence". + :vartype type: str + :ivar frequency: The frequency to trigger schedule. Required. Known values are: "Month", + "Week", "Day", "Hour", and "Minute". + :vartype frequency: str or ~azure.ai.client.models.Frequency + :ivar interval: Specifies schedule interval in conjunction with frequency. Required. + :vartype interval: int + :ivar schedule: The recurrence schedule. Required. + :vartype schedule: ~azure.ai.client.models.RecurrenceSchedule + """ + + type: Literal["Recurrence"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore + """Required. Default value is \"Recurrence\".""" + frequency: Union[str, "_models.Frequency"] = rest_field() + """The frequency to trigger schedule. Required. Known values are: \"Month\", \"Week\", \"Day\", + \"Hour\", and \"Minute\".""" + interval: int = rest_field() + """Specifies schedule interval in conjunction with frequency. Required.""" + schedule: "_models.RecurrenceSchedule" = rest_field() + """The recurrence schedule. Required.""" + + @overload + def __init__( + self, + *, + frequency: Union[str, "_models.Frequency"], + interval: int, + schedule: "_models.RecurrenceSchedule", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="Recurrence", **kwargs) + + class RequiredAction(_model_base.Model): """An abstract representation of a required action for an agent thread run to continue. @@ -2626,10 +2909,10 @@ def __init__( self, *, type: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2665,10 +2948,10 @@ def __init__( *, type: str, id: str, # pylint: disable=redefined-builtin - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2707,10 +2990,10 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin function: "_models.RequiredFunctionToolCallDetails", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2745,10 +3028,10 @@ def __init__( *, name: str, arguments: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2785,10 +3068,10 @@ def __init__( completion_tokens: int, prompt_tokens: int, total_tokens: int, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2819,10 +3102,10 @@ def __init__( *, code: str, message: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2940,10 +3223,10 @@ def __init__( failed_at: datetime.datetime, metadata: Dict[str, str], usage: Optional["_models.RunStepCompletionUsage"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -2954,6 +3237,129 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.object: Literal["thread.run.step"] = "thread.run.step" +class RunStepToolCall(_model_base.Model): + """An abstract representation of a detailed tool call as recorded within a run step for an + existing run. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepAzureAISearchToolCall, RunStepBingSearchToolCall, RunStepCodeInterpreterToolCall, + RunStepFileSearchToolCall, RunStepFunctionToolCall, RunStepMicrosoftFabricToolCall, + RunStepSharepointToolCall + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + id: str = rest_field() + """The ID of the tool call. This ID must be referenced when you submit tool outputs. Required.""" + + @overload + def __init__( + self, + *, + type: str, + id: str, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, **kwargs) + + +class RunStepAzureAISearchToolCall(RunStepToolCall, discriminator="azure_ai_search"): + """A record of a call to an Azure AI Search tool, issued by the model in evaluation of a defined + tool, that represents + executed Azure AI search. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is + "azure_ai_search". + :vartype type: str + :ivar azure_ai_search: Reserved for future use. Required. + :vartype azure_ai_search: dict[str, str] + """ + + type: Literal["azure_ai_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'azure_ai_search'. Required. Default value is + \"azure_ai_search\".""" + azure_ai_search: Dict[str, str] = rest_field() + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + azure_ai_search: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="azure_ai_search", **kwargs) + + +class RunStepBingSearchToolCall(RunStepToolCall, discriminator="bing_search"): + """A record of a call to a bing search tool, issued by the model in evaluation of a defined tool, + that represents + executed bing search. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'bing_search'. Required. Default value is + "bing_search". + :vartype type: str + :ivar bing_search: Reserved for future use. Required. + :vartype bing_search: dict[str, str] + """ + + type: Literal["bing_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'bing_search'. Required. Default value is \"bing_search\".""" + bing_search: Dict[str, str] = rest_field() + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + bing_search: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="bing_search", **kwargs) + + class RunStepCodeInterpreterToolCallOutput(_model_base.Model): """An abstract representation of an emitted output from a code interpreter tool. @@ -2974,10 +3380,10 @@ def __init__( self, *, type: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3008,10 +3414,10 @@ def __init__( self, *, image: "_models.RunStepCodeInterpreterImageReference", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3037,10 +3443,10 @@ def __init__( self, *, file_id: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3071,10 +3477,10 @@ def __init__( self, *, logs: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3084,46 +3490,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, type="logs", **kwargs) -class RunStepToolCall(_model_base.Model): - """An abstract representation of a detailed tool call as recorded within a run step for an - existing run. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepCodeInterpreterToolCall, RunStepFileSearchToolCall, RunStepFunctionToolCall - - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - id: str = rest_field() - """The ID of the tool call. This ID must be referenced when you submit tool outputs. Required.""" - - @overload - def __init__( - self, - *, - type: str, - id: str, # pylint: disable=redefined-builtin - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - class RunStepCodeInterpreterToolCall(RunStepToolCall, discriminator="code_interpreter"): """A record of a call to a code interpreter tool, issued by the model in evaluation of a defined tool, that @@ -3152,10 +3518,10 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3188,10 +3554,10 @@ def __init__( *, input: str, outputs: List["_models.RunStepCodeInterpreterToolCallOutput"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3228,10 +3594,10 @@ def __init__( completion_tokens: int, prompt_tokens: int, total_tokens: int, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3256,10 +3622,10 @@ def __init__( self, *, step_details: Optional["_models.RunStepDeltaDetail"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3298,10 +3664,10 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin delta: "_models.RunStepDelta", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3339,10 +3705,10 @@ def __init__( *, input: Optional[str] = None, outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3382,10 +3748,10 @@ def __init__( *, index: int, type: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3420,10 +3786,10 @@ def __init__( *, index: int, image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3448,10 +3814,10 @@ def __init__( self, *, file_id: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3486,10 +3852,10 @@ def __init__( *, index: int, logs: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3533,10 +3899,10 @@ def __init__( index: int, id: str, # pylint: disable=redefined-builtin type: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3574,10 +3940,10 @@ def __init__( index: int, id: str, # pylint: disable=redefined-builtin code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3607,10 +3973,10 @@ def __init__( self, *, type: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3647,10 +4013,10 @@ def __init__( index: int, id: str, # pylint: disable=redefined-builtin file_search: Optional[Dict[str, str]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3685,10 +4051,10 @@ def __init__( name: Optional[str] = None, arguments: Optional[str] = None, output: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3725,10 +4091,10 @@ def __init__( index: int, id: str, # pylint: disable=redefined-builtin function: Optional["_models.RunStepDeltaFunction"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3760,10 +4126,10 @@ def __init__( self, *, message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3788,10 +4154,10 @@ def __init__( self, *, message_id: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3822,10 +4188,10 @@ def __init__( self, *, tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3855,10 +4221,10 @@ def __init__( self, *, type: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3891,10 +4257,10 @@ def __init__( *, code: Union[str, "_models.RunStepErrorCode"], message: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3931,10 +4297,10 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin file_search: Dict[str, str], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -3970,10 +4336,10 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin function: "_models.RunStepFunctionToolCallDetails", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4012,10 +4378,10 @@ def __init__( name: str, arguments: str, output: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4048,10 +4414,10 @@ def __init__( self, *, message_creation: "_models.RunStepMessageCreationReference", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4077,10 +4443,10 @@ def __init__( self, *, message_id: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4090,6 +4456,87 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) +class RunStepMicrosoftFabricToolCall(RunStepToolCall, discriminator="microsoft_fabric"): + """A record of a call to a Microsoft Fabric tool, issued by the model in evaluation of a defined + tool, that represents + executed Microsoft Fabric operations. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'microsoft_fabric'. Required. Default value is + "microsoft_fabric". + :vartype type: str + :ivar microsoft_fabric: Reserved for future use. Required. + :vartype microsoft_fabric: dict[str, str] + """ + + type: Literal["microsoft_fabric"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'microsoft_fabric'. Required. Default value is + \"microsoft_fabric\".""" + microsoft_fabric: Dict[str, str] = rest_field() + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + microsoft_fabric: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="microsoft_fabric", **kwargs) + + +class RunStepSharepointToolCall(RunStepToolCall, discriminator="sharepoint"): + """A record of a call to a SharePoint tool, issued by the model in evaluation of a defined tool, + that represents + executed SharePoint actions. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'sharepoint'. Required. Default value is + "sharepoint". + :vartype type: str + :ivar share_point: Reserved for future use. Required. + :vartype share_point: dict[str, str] + """ + + type: Literal["sharepoint"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'sharepoint'. Required. Default value is \"sharepoint\".""" + share_point: Dict[str, str] = rest_field(name="sharepoint") + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + share_point: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="sharepoint", **kwargs) + + class RunStepToolCallDetails(RunStepDetails, discriminator="tool_calls"): """The detailed information associated with a run step calling tools. @@ -4112,10 +4559,10 @@ def __init__( self, *, tool_calls: List["_models.RunStepToolCall"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4141,10 +4588,10 @@ def __init__( self, *, rate: float, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4154,6 +4601,34 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, **kwargs) +class SharepointToolDefinition(ToolDefinition, discriminator="sharepoint"): + """The input definition information for a sharepoint tool as used to configure an agent. + + + :ivar type: The object type, which is always 'sharepoint'. Required. Default value is + "sharepoint". + :vartype type: str + """ + + type: Literal["sharepoint"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'sharepoint'. Required. Default value is \"sharepoint\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation + super().__init__(*args, type="sharepoint", **kwargs) + + class SubmitToolOutputsAction(RequiredAction, discriminator="submit_tool_outputs"): """The details for required tool calls that must be submitted for an agent thread run to continue. @@ -4177,10 +4652,10 @@ def __init__( self, *, submit_tool_outputs: "_models.SubmitToolOutputsDetails", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4207,10 +4682,10 @@ def __init__( self, *, tool_calls: List["_models.RequiredToolCall"], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4276,10 +4751,10 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin deleted: bool, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4388,10 +4863,10 @@ def __init__( run_id: str, attachments: List["_models.MessageAttachment"], metadata: Dict[str, str], - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4458,10 +4933,10 @@ def __init__( content: str, attachments: Optional[List["_models.MessageAttachment"]] = None, metadata: Optional[Dict[str, str]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4657,10 +5132,10 @@ def __init__( # pylint: disable=too-many-locals top_p: Optional[float] = None, tool_resources: Optional["_models.UpdateToolResourcesOptions"] = None, parallel_tool_calls: Optional[bool] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4694,10 +5169,10 @@ def __init__( *, tool_call_id: Optional[str] = None, output: Optional[str] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4720,12 +5195,32 @@ class ToolResources(_model_base.Model): :ivar file_search: Resources to be used by the ``file_search`` tool consisting of vector store IDs. :vartype file_search: ~azure.ai.client.models.FileSearchToolResource + :ivar bing_search: Resources to be used by the ``bing_search`` tool consisting of connection + IDs. + :vartype bing_search: ~azure.ai.client.models.ConnectionListResource + :ivar microsoft_fabric: Resources to be used by the ``microsoft_fabric`` tool consisting of + connection IDs. + :vartype microsoft_fabric: ~azure.ai.client.models.ConnectionListResource + :ivar share_point: Resources to be used by the ``sharepoint`` tool consisting of connection + IDs. + :vartype share_point: ~azure.ai.client.models.ConnectionListResource + :ivar azure_ai_search: Resources to be used by the ``azure_ai_search`` tool consisting of index + IDs and names. + :vartype azure_ai_search: ~azure.ai.client.models.AzureAISearchResource """ code_interpreter: Optional["_models.CodeInterpreterToolResource"] = rest_field() """Resources to be used by the ``code_interpreter tool`` consisting of file IDs.""" file_search: Optional["_models.FileSearchToolResource"] = rest_field() """Resources to be used by the ``file_search`` tool consisting of vector store IDs.""" + bing_search: Optional["_models.ConnectionListResource"] = rest_field() + """Resources to be used by the ``bing_search`` tool consisting of connection IDs.""" + microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() + """Resources to be used by the ``microsoft_fabric`` tool consisting of connection IDs.""" + share_point: Optional["_models.ConnectionListResource"] = rest_field(name="sharepoint") + """Resources to be used by the ``sharepoint`` tool consisting of connection IDs.""" + azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() + """Resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names.""" @overload def __init__( @@ -4733,10 +5228,14 @@ def __init__( *, code_interpreter: Optional["_models.CodeInterpreterToolResource"] = None, file_search: Optional["_models.FileSearchToolResource"] = None, - ): ... + bing_search: Optional["_models.ConnectionListResource"] = None, + microsoft_fabric: Optional["_models.ConnectionListResource"] = None, + share_point: Optional["_models.ConnectionListResource"] = None, + azure_ai_search: Optional["_models.AzureAISearchResource"] = None, + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4779,10 +5278,10 @@ def __init__( *, type: Union[str, "_models.TruncationStrategy"], last_messages: Optional[int] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4807,50 +5306,10 @@ def __init__( self, *, file_ids: Optional[List[str]] = None, - ): ... - - @overload - def __init__(self, mapping: Mapping[str, Any]): - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class UpdateEvaluationRequest(_model_base.Model): - """Update Evaluation Request. - - All required parameters must be populated in order to send to server. - - :ivar tags: Tags to be updated. Required. - :vartype tags: dict[str, str] - :ivar display_name: Display Name. Required. - :vartype display_name: str - :ivar description: Description. Required. - :vartype description: str - """ - - tags: Dict[str, str] = rest_field() - """Tags to be updated. Required.""" - display_name: str = rest_field(name="displayName") - """Display Name. Required.""" - description: str = rest_field() - """Description. Required.""" - - @overload - def __init__( - self, - *, - tags: Dict[str, str], - display_name: str, - description: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4875,10 +5334,10 @@ def __init__( self, *, vector_store_ids: Optional[List[str]] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -4902,6 +5361,18 @@ class UpdateToolResourcesOptions(_model_base.Model): :ivar file_search: Overrides the vector store attached to this agent. There can be a maximum of 1 vector store attached to the agent. :vartype file_search: ~azure.ai.client.models.UpdateFileSearchToolResourceOptions + :ivar bing_search: Overrides the list of connections to be used by the ``bing_search`` tool + consisting of connection IDs. + :vartype bing_search: ~azure.ai.client.models.ConnectionListResource + :ivar microsoft_fabric: Overrides the list of connections to be used by the + ``microsoft_fabric`` tool consisting of connection IDs. + :vartype microsoft_fabric: ~azure.ai.client.models.ConnectionListResource + :ivar share_point: Overrides the list of connections to be used by the ``sharepoint`` tool + consisting of connection IDs. + :vartype share_point: ~azure.ai.client.models.ConnectionListResource + :ivar azure_ai_search: Overrides the resources to be used by the ``azure_ai_search`` tool + consisting of index IDs and names. + :vartype azure_ai_search: ~azure.ai.client.models.AzureAISearchResource """ code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = rest_field() @@ -4911,6 +5382,18 @@ class UpdateToolResourcesOptions(_model_base.Model): file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = rest_field() """Overrides the vector store attached to this agent. There can be a maximum of 1 vector store attached to the agent.""" + bing_search: Optional["_models.ConnectionListResource"] = rest_field() + """Overrides the list of connections to be used by the ``bing_search`` tool consisting of + connection IDs.""" + microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() + """Overrides the list of connections to be used by the ``microsoft_fabric`` tool consisting of + connection IDs.""" + share_point: Optional["_models.ConnectionListResource"] = rest_field(name="sharepoint") + """Overrides the list of connections to be used by the ``sharepoint`` tool consisting of + connection IDs.""" + azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() + """Overrides the resources to be used by the ``azure_ai_search`` tool consisting of index IDs and + names.""" @overload def __init__( @@ -4918,10 +5401,14 @@ def __init__( *, code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = None, file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = None, - ): ... + bing_search: Optional["_models.ConnectionListResource"] = None, + microsoft_fabric: Optional["_models.ConnectionListResource"] = None, + share_point: Optional["_models.ConnectionListResource"] = None, + azure_ai_search: Optional["_models.AzureAISearchResource"] = None, + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5010,10 +5497,10 @@ def __init__( metadata: Dict[str, str], expires_after: Optional["_models.VectorStoreExpirationPolicy"] = None, expires_at: Optional[datetime.datetime] = None, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5045,10 +5532,10 @@ def __init__( self, *, type: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5074,10 +5561,10 @@ class VectorStoreAutoChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, @overload def __init__( self, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5107,10 +5594,10 @@ def __init__( self, *, type: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5135,10 +5622,10 @@ class VectorStoreAutoChunkingStrategyResponse(VectorStoreChunkingStrategyRespons @overload def __init__( self, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5177,10 +5664,10 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin deleted: bool, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5214,10 +5701,10 @@ def __init__( *, anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"], days: int, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5293,10 +5780,10 @@ def __init__( status: Union[str, "_models.VectorStoreFileStatus"], last_error: "_models.VectorStoreFileError", chunking_strategy: "_models.VectorStoreChunkingStrategyResponse", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5357,10 +5844,10 @@ def __init__( vector_store_id: str, status: Union[str, "_models.VectorStoreFileBatchStatus"], file_counts: "_models.VectorStoreFileCount", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5407,10 +5894,10 @@ def __init__( failed: int, cancelled: int, total: int, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5449,10 +5936,10 @@ def __init__( *, id: str, # pylint: disable=redefined-builtin deleted: bool, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5486,10 +5973,10 @@ def __init__( *, code: Union[str, "_models.VectorStoreFileErrorCode"], message: str, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5525,10 +6012,10 @@ def __init__( *, max_chunk_size_tokens: int, chunk_overlap_tokens: int, - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5559,10 +6046,10 @@ def __init__( self, *, static: "_models.VectorStoreStaticChunkingStrategyOptions", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] @@ -5594,10 +6081,10 @@ def __init__( self, *, static: "_models.VectorStoreStaticChunkingStrategyOptions", - ): ... + ) -> None: ... @overload - def __init__(self, mapping: Mapping[str, Any]): + def __init__(self, mapping: Mapping[str, Any]) -> None: """ :param mapping: raw JSON to initialize the model. :type mapping: Mapping[str, Any] diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index 2e0431cc862f..33c403ff5a33 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -662,11 +662,7 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: parsed_data = event_data # Workaround for service bug: Rename 'expires_at' to 'expired_at' - if ( - event_type.startswith("thread.run.step") - and isinstance(parsed_data, dict) - and "expires_at" in parsed_data - ): + if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: parsed_data["expired_at"] = parsed_data.pop("expires_at") # Map to the appropriate class instance diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py index 8e6a46afb1f5..1a46c6d8abb9 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py @@ -19,5 +19,5 @@ "EndpointsOperations", "EvaluationsOperations", ] -__all__.extend([p for p in _patch_all if p not in __all__]) +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py index 2d3adab9ec1f..de3e70aa578a 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py @@ -1210,20 +1210,44 @@ def build_endpoints_list_secrets_request(connection_name_in_url: str, **kwargs: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_evaluations_create_request(**kwargs: Any) -> HttpRequest: +def build_evaluations_get_request(id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/evaluations/create" + _url = "/evaluations/runs/{id}" + path_format_arguments = { + "id": _SERIALIZER.url("id", id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_create_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("apiVersion", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/runs:run" + + # Construct parameters + _params["apiVersion"] = _SERIALIZER.query("api_version", api_version, "str") + # Construct headers if content_type is not None: _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") @@ -1242,7 +1266,7 @@ def build_evaluations_list_request( accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/evaluations" + _url = "/evaluations/runs" # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") @@ -1268,7 +1292,7 @@ def build_evaluations_update_request(id: str, **kwargs: Any) -> HttpRequest: accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/evaluations/{id}" + _url = "/evaluations/runs/{id}" path_format_arguments = { "id": _SERIALIZER.url("id", id, "str"), } @@ -1286,7 +1310,7 @@ def build_evaluations_update_request(id: str, **kwargs: Any) -> HttpRequest: return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) -def build_evaluations_get_request(id: str, **kwargs: Any) -> HttpRequest: +def build_evaluations_get_schedule_request(id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -1294,7 +1318,7 @@ def build_evaluations_get_request(id: str, **kwargs: Any) -> HttpRequest: accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/evaluations/{id}" + _url = "/evaluations/schedules/{id}" path_format_arguments = { "id": _SERIALIZER.url("id", id, "str"), } @@ -1310,7 +1334,9 @@ def build_evaluations_get_request(id: str, **kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_evaluations_create_schedule_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long +def build_evaluations_create_or_replace_schedule_request( # pylint: disable=name-too-long + id: str, **kwargs: Any +) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -1318,27 +1344,6 @@ def build_evaluations_create_schedule_request(**kwargs: Any) -> HttpRequest: # api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) accept = _headers.pop("Accept", "application/json") - # Construct URL - _url = "/evaluations/schedules/create" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_get_schedule_request(id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - # Construct URL _url = "/evaluations/schedules/{id}" path_format_arguments = { @@ -1351,12 +1356,14 @@ def build_evaluations_get_schedule_request(id: str, **kwargs: Any) -> HttpReques _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) -def build_evaluations_list_schedules_request( +def build_evaluations_list_schedule_request( *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -1383,38 +1390,6 @@ def build_evaluations_list_schedules_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_evaluations_list_schedule_evaluations_request( # pylint: disable=name-too-long - id: str, *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/schedules/{id}/runs" - path_format_arguments = { - "id": _SERIALIZER.url("id", id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if top is not None: - _params["top"] = _SERIALIZER.query("top", top, "int") - if skip is not None: - _params["skip"] = _SERIALIZER.query("skip", skip, "int") - if maxpagesize is not None: - _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - def build_evaluations_delete_schedule_request(id: str, **kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -6527,13 +6502,83 @@ def __init__(self, *args, **kwargs): self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + @distributed_trace + def get(self, id: str, **kwargs: Any) -> _models.Evaluation: + """Resource read operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + _request = build_evaluations_get_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + @overload def create( self, evaluation: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any ) -> _models.Evaluation: - """Creates an evaluation. + """Run the evaluation. - :param evaluation: Properties of Evaluation. Required. + :param evaluation: Evaluation to run. Required. :type evaluation: ~azure.ai.client.models.Evaluation :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -6545,9 +6590,9 @@ def create( @overload def create(self, evaluation: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Evaluation: - """Creates an evaluation. + """Run the evaluation. - :param evaluation: Properties of Evaluation. Required. + :param evaluation: Evaluation to run. Required. :type evaluation: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". @@ -6561,9 +6606,9 @@ def create(self, evaluation: JSON, *, content_type: str = "application/json", ** def create( self, evaluation: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.Evaluation: - """Creates an evaluation. + """Run the evaluation. - :param evaluation: Properties of Evaluation. Required. + :param evaluation: Evaluation to run. Required. :type evaluation: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". @@ -6575,9 +6620,9 @@ def create( @distributed_trace def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: - """Creates an evaluation. + """Run the evaluation. - :param evaluation: Properties of Evaluation. Is one of the following types: Evaluation, JSON, + :param evaluation: Evaluation to run. Is one of the following types: Evaluation, JSON, IO[bytes] Required. :type evaluation: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] :return: Evaluation. The Evaluation is compatible with MutableMapping @@ -6652,7 +6697,7 @@ def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwarg def list( self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any ) -> Iterable["_models.Evaluation"]: - """List evaluations. + """Resource list operation template. :keyword top: The number of result items to return. Default value is None. :paramtype top: int @@ -6754,19 +6799,19 @@ def get_next(next_link=None): def update( self, id: str, - update_request: _models.UpdateEvaluationRequest, + resource: _models.Evaluation, *, - content_type: str = "application/json", + content_type: str = "application/merge-patch+json", **kwargs: Any ) -> _models.Evaluation: - """Update an evaluation. + """Resource update operation template. :param id: Identifier of the evaluation. Required. :type id: str - :param update_request: Update evaluation request. Required. - :type update_request: ~azure.ai.client.models.UpdateEvaluationRequest + :param resource: The resource instance. Required. + :type resource: ~azure.ai.client.models.Evaluation :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". + Default value is "application/merge-patch+json". :paramtype content_type: str :return: Evaluation. The Evaluation is compatible with MutableMapping :rtype: ~azure.ai.client.models.Evaluation @@ -6775,16 +6820,16 @@ def update( @overload def update( - self, id: str, update_request: JSON, *, content_type: str = "application/json", **kwargs: Any + self, id: str, resource: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> _models.Evaluation: - """Update an evaluation. + """Resource update operation template. :param id: Identifier of the evaluation. Required. :type id: str - :param update_request: Update evaluation request. Required. - :type update_request: JSON + :param resource: The resource instance. Required. + :type resource: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". + Default value is "application/merge-patch+json". :paramtype content_type: str :return: Evaluation. The Evaluation is compatible with MutableMapping :rtype: ~azure.ai.client.models.Evaluation @@ -6793,16 +6838,16 @@ def update( @overload def update( - self, id: str, update_request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, id: str, resource: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any ) -> _models.Evaluation: - """Update an evaluation. + """Resource update operation template. :param id: Identifier of the evaluation. Required. :type id: str - :param update_request: Update evaluation request. Required. - :type update_request: IO[bytes] + :param resource: The resource instance. Required. + :type resource: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". + Default value is "application/merge-patch+json". :paramtype content_type: str :return: Evaluation. The Evaluation is compatible with MutableMapping :rtype: ~azure.ai.client.models.Evaluation @@ -6811,15 +6856,15 @@ def update( @distributed_trace def update( - self, id: str, update_request: Union[_models.UpdateEvaluationRequest, JSON, IO[bytes]], **kwargs: Any + self, id: str, resource: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any ) -> _models.Evaluation: - """Update an evaluation. + """Resource update operation template. :param id: Identifier of the evaluation. Required. :type id: str - :param update_request: Update evaluation request. Is one of the following types: - UpdateEvaluationRequest, JSON, IO[bytes] Required. - :type update_request: ~azure.ai.client.models.UpdateEvaluationRequest or JSON or IO[bytes] + :param resource: The resource instance. Is one of the following types: Evaluation, JSON, + IO[bytes] Required. + :type resource: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] :return: Evaluation. The Evaluation is compatible with MutableMapping :rtype: ~azure.ai.client.models.Evaluation :raises ~azure.core.exceptions.HttpResponseError: @@ -6838,12 +6883,12 @@ def update( content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - content_type = content_type or "application/json" + content_type = content_type or "application/merge-patch+json" _content = None - if isinstance(update_request, (IOBase, bytes)): - _content = update_request + if isinstance(resource, (IOBase, bytes)): + _content = resource else: - _content = json.dumps(update_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_evaluations_update_request( id=id, @@ -6879,24 +6924,29 @@ def update( map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + if _stream: deserialized = response.iter_bytes() else: deserialized = _deserialize(_models.Evaluation, response.json()) if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore @distributed_trace - def get(self, id: str, **kwargs: Any) -> _models.Evaluation: - """Get an evaluation. + def get_schedule(self, id: str, **kwargs: Any) -> _models.EvaluationSchedule: + """Resource read operation template. :param id: Identifier of the evaluation. Required. :type id: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.client.models.EvaluationSchedule :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -6910,9 +6960,9 @@ def get(self, id: str, **kwargs: Any) -> _models.Evaluation: _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) - _request = build_evaluations_get_request( + _request = build_evaluations_get_schedule_request( id=id, api_version=self._config.api_version, headers=_headers, @@ -6944,24 +6994,31 @@ def get(self, id: str, **kwargs: Any) -> _models.Evaluation: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.Evaluation, response.json()) + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore @overload - def create_schedule( - self, body: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any + def create_or_replace_schedule( + self, id: str, resource: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any ) -> _models.EvaluationSchedule: - """Creates an evaluation schedule. + """Create or replace operation template. - :param body: Properties of Evaluation Schedule. Required. - :type body: ~azure.ai.client.models.EvaluationSchedule + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: ~azure.ai.client.models.EvaluationSchedule :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -6971,13 +7028,15 @@ def create_schedule( """ @overload - def create_schedule( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + def create_or_replace_schedule( + self, id: str, resource: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.EvaluationSchedule: - """Creates an evaluation schedule. + """Create or replace operation template. - :param body: Properties of Evaluation Schedule. Required. - :type body: JSON + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: JSON :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -6987,13 +7046,15 @@ def create_schedule( """ @overload - def create_schedule( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + def create_or_replace_schedule( + self, id: str, resource: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.EvaluationSchedule: - """Creates an evaluation schedule. + """Create or replace operation template. - :param body: Properties of Evaluation Schedule. Required. - :type body: IO[bytes] + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: IO[bytes] :keyword content_type: Body Parameter content-type. Content type parameter for binary body. Default value is "application/json". :paramtype content_type: str @@ -7003,14 +7064,16 @@ def create_schedule( """ @distributed_trace - def create_schedule( - self, body: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any + def create_or_replace_schedule( + self, id: str, resource: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any ) -> _models.EvaluationSchedule: - """Creates an evaluation schedule. + """Create or replace operation template. - :param body: Properties of Evaluation Schedule. Is one of the following types: - EvaluationSchedule, JSON, IO[bytes] Required. - :type body: ~azure.ai.client.models.EvaluationSchedule or JSON or IO[bytes] + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Is one of the following types: EvaluationSchedule, + JSON, IO[bytes] Required. + :type resource: ~azure.ai.client.models.EvaluationSchedule or JSON or IO[bytes] :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping :rtype: ~azure.ai.client.models.EvaluationSchedule :raises ~azure.core.exceptions.HttpResponseError: @@ -7031,12 +7094,13 @@ def create_schedule( content_type = content_type or "application/json" _content = None - if isinstance(body, (IOBase, bytes)): - _content = body + if isinstance(resource, (IOBase, bytes)): + _content = resource else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_evaluations_create_schedule_request( + _request = build_evaluations_create_or_replace_schedule_request( + id=id, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -7060,7 +7124,7 @@ def create_schedule( response = pipeline_response.http_response - if response.status_code not in [201]: + if response.status_code not in [200, 201]: if _stream: try: response.read() # Load the body in memory and close the socket @@ -7069,86 +7133,26 @@ def create_schedule( map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.EvaluationSchedule, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_schedule(self, id: str, **kwargs: Any) -> _models.EvaluationSchedule: - """Get an evaluation schedule along with runs. - - :param id: Identifier of the evaluation schedule. Required. - :type id: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.client.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) - - _request = build_evaluations_get_schedule_request( - id=id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - if _stream: deserialized = response.iter_bytes() else: deserialized = _deserialize(_models.EvaluationSchedule, response.json()) if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + return cls(pipeline_response, deserialized, response_headers) # type: ignore return deserialized # type: ignore @distributed_trace - def list_schedules( + def list_schedule( self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any ) -> Iterable["_models.EvaluationSchedule"]: - """List evaluation schedules. + """Resource list operation template. :keyword top: The number of result items to return. Default value is None. :paramtype top: int @@ -7175,7 +7179,7 @@ def list_schedules( def prepare_request(next_link=None): if not next_link: - _request = build_evaluations_list_schedules_request( + _request = build_evaluations_list_schedule_request( top=top, skip=skip, maxpagesize=maxpagesize, @@ -7246,116 +7250,11 @@ def get_next(next_link=None): return ItemPaged(get_next, extract_data) - @distributed_trace - def list_schedule_evaluations( - self, id: str, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any - ) -> Iterable["_models.Evaluation"]: - """List evaluations under a schedule. - - :param id: Identifier of the evaluation schedule. Required. - :type id: str - :keyword top: The number of result items to return. Default value is None. - :paramtype top: int - :keyword skip: The number of result items to skip. Default value is None. - :paramtype skip: int - :return: An iterator like instance of Evaluation - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.client.models.Evaluation] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_evaluations_list_schedule_evaluations_request( - id=id, - top=top, - skip=skip, - maxpagesize=maxpagesize, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, iter(list_of_elem) - - def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return ItemPaged(get_next, extract_data) - @distributed_trace def delete_schedule(self, id: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements - """Delete an evaluation schedule. + """Resource delete operation template. - :param id: Identifier of the evaluation schedule. Required. + :param id: Identifier of the evaluation. Required. :type id: str :return: None :rtype: None @@ -7401,5 +7300,10 @@ def delete_schedule(self, id: str, **kwargs: Any) -> None: # pylint: disable=in map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + if cls: - return cls(pipeline_response, None, {}) # type: ignore + return cls(pipeline_response, None, response_headers) # type: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index 00d2d4740895..ddbfe9893234 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -1,4 +1,5 @@ # pylint: disable=too-many-lines +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -143,9 +144,7 @@ def get_azure_openai_client(self) -> "AzureOpenAI": "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" ) client = AzureOpenAI( - api_key=endpoint.key, - azure_endpoint=endpoint.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION + api_key=endpoint.key, azure_endpoint=endpoint.endpoint_url, api_version=AZURE_OPENAI_API_VERSION ) elif endpoint.authentication_type == AuthenticationType.AAD: logger.debug( @@ -163,7 +162,7 @@ def get_azure_openai_client(self) -> "AzureOpenAI": endpoint.token_credential, "https://cognitiveservices.azure.com/.default" ), azure_endpoint=endpoint.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION + api_version=AZURE_OPENAI_API_VERSION, ) elif endpoint.authentication_type == AuthenticationType.SAS: logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") @@ -172,7 +171,7 @@ def get_azure_openai_client(self) -> "AzureOpenAI": endpoint.token_credential, "https://cognitiveservices.azure.com/.default" ), azure_endpoint=endpoint.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION + api_version=AZURE_OPENAI_API_VERSION, ) else: raise ValueError("Unknown authentication type") @@ -790,7 +789,7 @@ def create_and_process_run( metadata=metadata, **kwargs, ) - + # Monitor and process the run status while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(sleep_interval) @@ -1084,12 +1083,11 @@ def create_stream( else: raise ValueError("Invalid combination of arguments provided.") - + response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - @overload def submit_tool_outputs_to_run( self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any @@ -1477,7 +1475,13 @@ def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: @overload def upload_file_and_poll( - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, sleep_interval: float = 1, **kwargs: Any + self, + *, + file: FileType, + purpose: Union[str, _models.FilePurpose], + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, ) -> _models.OpenAIFile: """Uploads a file for use by other operations. @@ -1513,7 +1517,7 @@ def upload_file_and_poll( :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: ~azure.ai.client.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: - """ + """ @distributed_trace def upload_file_and_poll( @@ -1525,7 +1529,7 @@ def upload_file_and_poll( purpose: Optional[Union[str, _models.FilePurpose]] = None, filename: Optional[str] = None, sleep_interval: float = 1, - **kwargs: Any + **kwargs: Any, ) -> _models.OpenAIFile: """ Uploads a file for use by other operations, delegating to the generated operations. @@ -1562,7 +1566,7 @@ def upload_file_and_poll( uploaded_file = self.get_file(uploaded_file.id) return uploaded_file - + @overload def create_vector_store_and_poll( self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any @@ -1592,8 +1596,8 @@ def create_vector_store_and_poll( expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any + sleep_interval: float = 1, + **kwargs: Any, ) -> _models.VectorStore: """Creates a vector store and poll. @@ -1654,7 +1658,7 @@ def create_vector_store_and_poll( chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, sleep_interval: float = 1, - **kwargs: Any + **kwargs: Any, ) -> _models.VectorStore: """Creates a vector store. @@ -1682,7 +1686,7 @@ def create_vector_store_and_poll( :rtype: ~azure.ai.client.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ - + if body is not None: vector_store = self.create_vector_store(body=body, content_type=content_type, **kwargs) elif file_ids is not None or (name is not None and expires_after is not None): @@ -1693,7 +1697,7 @@ def create_vector_store_and_poll( expires_after=expires_after, chunking_strategy=chunking_strategy, metadata=metadata, - **kwargs + **kwargs, ) else: raise ValueError( diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py index 2e6a67c27dde..2fb06d8c9fdb 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py @@ -30,14 +30,13 @@ async def main(): - + # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Or, you can create the Azure AI Client by giving all required parameters directly @@ -50,18 +49,20 @@ async def main(): workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) - """ - + """ + async with ai_client: agent = await ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" - ) + ) print(f"Created agent, agent ID: {agent.id}") - + thread = await ai_client.agents.create_thread() print(f"Created thread, thread ID: {thread.id}") - message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = await ai_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) print(f"Created message, message ID: {message.id}") run = await ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py index 3359c92c6918..33a631f5e503 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py @@ -38,8 +38,7 @@ async def main(): # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Or, you can create the Azure AI Client by giving all required parameters directly @@ -52,15 +51,18 @@ async def main(): workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) - """ - + """ + async with ai_client: # Initialize assistant functions functions = AsyncFunctionTool(functions=user_async_functions) # Create agent agent = await ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", tools=functions.definitions + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=functions.definitions, ) print(f"Created agent, agent ID: {agent.id}") @@ -69,7 +71,9 @@ async def main(): print(f"Created thread, ID: {thread.id}") # Create and send message - message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what's the time?") + message = await ai_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, what's the time?" + ) print(f"Created message, ID: {message.id}") # Create and run assistant task @@ -81,7 +85,7 @@ async def main(): time.sleep(4) run = await ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print("No tool calls provided - cancelling run") @@ -93,10 +97,12 @@ async def main(): if isinstance(tool_call, RequiredFunctionToolCall): try: output = await functions.execute(tool_call) - tool_outputs.append({ - "tool_call_id": tool_call.id, - "output": output, - }) + tool_outputs.append( + { + "tool_call_id": tool_call.id, + "output": output, + } + ) except Exception as e: print(f"Error executing tool_call {tool_call.id}: {e}") @@ -120,4 +126,4 @@ async def main(): if __name__ == "__main__": - asyncio.run(main()); \ No newline at end of file + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations_schedules.py b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations_schedules.py new file mode 100644 index 000000000000..288aeb4dfb3f --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations_schedules.py @@ -0,0 +1,65 @@ +from azure.ai.client import AzureAIClient +from azure.identity import DefaultAzureCredential +from azure.ai.client.models import AppInsightsConfiguration, EvaluatorConfiguration, SamplingStrategy, EvaluationSchedule, CronTrigger + +def main(): + app_insights_config = AppInsightsConfiguration( + resource_id="sample_id", + query="your_connection_string", + service_name="sample_service_name" + ) + + f1_evaluator_config = EvaluatorConfiguration( + id="azureml://registries/jamahaja-evals-registry/models/F1ScoreEvaluator/versions/1" + ) + + custom_relevance_evaluator_config = EvaluatorConfiguration( + id="azureml://registries/jamahaja-evals-registry/models/Relevance-Evaluator-AI-Evaluation/versions/2", + init_params={"param3": "value3", "param4": "value4"}, + data_mapping={"data3": "value3", "data4": "value4"} + ) + + cron_expression = "0 0 0 1/1 * ? *" + cron_trigger = CronTrigger(expression=cron_expression) + evaluators = { + "f1_score": f1_evaluator_config, + "relevance": custom_relevance_evaluator_config + } + + sampling_strategy = SamplingStrategy(rate=0.2) + display_name = "Sample Online Evaluation Schedule" + description = "Sample Online Evaluation Schedule Description" + tags = {"tag1": "value1", "tag2": "value2"} + properties = {"property1": "value1", "property2": "value2"} + + evaluation_schedule = EvaluationSchedule( + data=app_insights_config, + evaluators=evaluators, + trigger=cron_trigger, + sampling_strategy=sampling_strategy, + display_name=display_name, + description=description, + tags=tags, + properties=properties + ) + + # Project Configuration + Subscription = "" + ResourceGroup = "" + Workspace = "" + Endpoint = "" + client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=f"{Endpoint};{Subscription};{ResourceGroup};{Workspace}", + logging_enable=True + ) + client.evaluations + evaluation_schedule = client.evaluations.create_or_replace_schedule(id= "sample_schedule_id", resource=evaluation_schedule) + client.evaluations.get_schedule(evaluation_schedule.id) + client.evaluations.list_schedule() + client.evaluations.list() + client.evaluations.delete_schedule(evaluation_schedule.id) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/tests/conftest.py b/sdk/ai/azure-ai-client/tests/conftest.py index 6a5c0fe7070b..d944cdf86007 100644 --- a/sdk/ai/azure-ai-client/tests/conftest.py +++ b/sdk/ai/azure-ai-client/tests/conftest.py @@ -6,6 +6,7 @@ import pytest from devtools_testutils import test_proxy, remove_batch_sanitizers + # autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method @pytest.fixture(scope="session", autouse=True) def start_proxy(test_proxy): @@ -17,4 +18,3 @@ def add_sanitizers(test_proxy): # Remove the following sanitizers since certain fields are needed in tests and are non-sensitive: # - AZSDK3493: $..name remove_batch_sanitizers(["AZSDK3493"]) - \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/tsp-location.yaml b/sdk/ai/azure-ai-client/tsp-location.yaml index 283f9c571164..4cf4cd6906cc 100644 --- a/sdk/ai/azure-ai-client/tsp-location.yaml +++ b/sdk/ai/azure-ai-client/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Client -commit: 6335d810e99108296eb601a9b7191b08b9368c29 +commit: 3ce2d0fb070fea5f0dd7ca7095eea280b1f81671 repo: Azure/azure-rest-api-specs additionalDirectories: From a0db3a8708630764d7d4342abfb0f9f102bc704c Mon Sep 17 00:00:00 2001 From: howieleung Date: Wed, 16 Oct 2024 09:56:45 -0700 Subject: [PATCH 030/138] Fixed poll while loop and sample (#37922) --- .../azure/ai/client/aio/operations/_patch.py | 2 +- .../azure-ai-client/azure/ai/client/operations/_patch.py | 2 +- .../agents/sample_agents_code_interpreter_attachment.py | 9 ++++++--- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index e1731fdf71d3..b919f922cdd8 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -1548,7 +1548,7 @@ async def upload_file_and_poll( "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." ) - while uploaded_file.status in {"uploaded", "pending", "running"}: + while uploaded_file.status in ["uploaded", "pending", "running"]: time.sleep(sleep_interval) uploaded_file = await self.get_file(uploaded_file.id) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index ddbfe9893234..ea399c57535c 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -1561,7 +1561,7 @@ def upload_file_and_poll( "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." ) - while uploaded_file.status in {"uploaded", "pending", "running"}: + while uploaded_file.status in ["uploaded", "pending", "running"]: time.sleep(sleep_interval) uploaded_file = self.get_file(uploaded_file.id) diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py index d5a8760fd00a..aa93db85756b 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py @@ -24,8 +24,8 @@ import os, time from azure.ai.client import AzureAIClient from azure.ai.client.models import CodeInterpreterTool -from azure.ai.client.models._enums import FilePurpose -from azure.ai.client.models._models import MessageAttachment +from azure.ai.client.models import FilePurpose +from azure.ai.client.models import CodeInterpreterToolDefinition, MessageAttachment from azure.identity import DefaultAzureCredential @@ -60,7 +60,10 @@ # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to view the file agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=[CodeInterpreterToolDefinition()] ) print(f"Created agent, agent ID: {agent.id}") From 5e48c75d3ab1b070e5639d4372140d3c2f99b99b Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Wed, 16 Oct 2024 14:11:11 -0800 Subject: [PATCH 031/138] Jhakulin/azure ai client agents updates2 (#37937) * minor fixes and updates * update * update --- .../azure/ai/client/operations/_patch.py | 64 +++++++- ...mple_agents_code_interpreter_attachment.py | 7 +- .../agents/sample_agents_file_search.py | 2 +- ...ents_stream_eventhandler_with_functions.py | 140 ++++++++++++++++++ ...agents_stream_eventhandler_with_toolset.py | 4 +- .../agents/sample_agents_stream_iteration.py | 6 +- ...mple_agents_with_file_search_attachment.py | 6 +- 7 files changed, 214 insertions(+), 15 deletions(-) create mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index ea399c57535c..4e654fd45d28 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -323,6 +323,64 @@ def create_agent( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.client.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: """Creates a new agent. @@ -366,7 +424,8 @@ def create_agent( :param instructions: System instructions for the agent. :param tools: List of tools definitions for the agent. :param tool_resources: Resources used by the agent's tools. - :param toolset: Collection of tools (alternative to `tools` and `tool_resources`). + :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). :param temperature: Sampling temperature for generating agent responses. :param top_p: Nucleus sampling parameter. :param response_format: Response format for tool calls. @@ -1351,7 +1410,8 @@ def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handler: Opt if toolset: tool_outputs = toolset.execute_tool_calls(tool_calls) else: - raise ValueError("Toolset is not available in the client.") + logger.warning("Toolset is not available in the client.") + return logger.info(f"Tool outputs: {tool_outputs}") if tool_outputs: diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py index aa93db85756b..794d53cd51c6 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py @@ -21,7 +21,7 @@ AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ -import os, time +import os from azure.ai.client import AzureAIClient from azure.ai.client.models import CodeInterpreterTool from azure.ai.client.models import FilePurpose @@ -52,7 +52,7 @@ with ai_client: # upload a file and wait for it to be processed - file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS, sleep_interval=4) + file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) print(f"Uploaded file, file ID: {file.id}") code_interpreter = CodeInterpreterTool() @@ -75,10 +75,9 @@ message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment]) print(f"Created message, message ID: {message.id}") - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, sleep_interval=4) + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Created run, run ID: {run.id}") - ai_client.agents.delete_file(file.id) print("Deleted file") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py index 7fc4d15278ff..edcb7ed3b5d8 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py @@ -67,7 +67,7 @@ toolset = ToolSet() toolset.add(file_search) - #Create agent with toolset and process assistant run + # Create agent with toolset and process assistant run agent = ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", toolset=toolset ) diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py new file mode 100644 index 000000000000..9bc86af6bae4 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py @@ -0,0 +1,140 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_with_functions.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler and toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_functions.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.client import AzureAIClient +from azure.ai.client.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.client.models import AgentEventHandler +from azure.identity import DefaultAzureCredential +from azure.ai.client.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction + +from typing import Any + +from user_functions import user_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +""" +ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) +""" + +class MyEventHandler(AgentEventHandler): + + def __init__(self, functions: FunctionTool) -> None: + self.functions = functions + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append({ + "tool_call_id": tool_call.id, + "output": output, + }) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + with ai_client.agents.submit_tool_outputs_to_stream( + thread_id=run.thread_id, + run_id=run.id, + tool_outputs=tool_outputs, + event_handler=self + ) as stream: + stream.until_done() + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +with ai_client: + functions = FunctionTool(user_functions) + + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", tools=functions.definitions + ) + print(f"Created agent, ID: {agent.id}") + + thread = ai_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.") + print(f"Created message, message ID {message.id}") + + with ai_client.agents.create_stream( + thread_id=thread.id, + assistant_id=agent.id, + event_handler=MyEventHandler(functions) + ) as stream: + stream.until_done() + + ai_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index d294a1962c64..d8f023c56004 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -57,7 +57,8 @@ ) """ - +# When using FunctionTool with ToolSet in agent creation, the tool call events are handled inside the create_stream +# method and functions gets automatically called by default. class MyEventHandler(AgentEventHandler): def on_message_delta(self, delta: "MessageDeltaChunk") -> None: @@ -88,7 +89,6 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - with ai_client: functions = FunctionTool(user_functions) toolset = ToolSet() diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py index dc1036017b09..3d860038710a 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py @@ -4,14 +4,14 @@ # ------------------------------------ """ -FILE: sample_agents_stream_iteration_with_toolset.py +FILE: sample_agents_stream_iteration.py DESCRIPTION: - This sample demonstrates how to use agent operations with toolset in streaming from + This sample demonstrates how to use agent operations in streaming from the Azure Agents service using a synchronous client. USAGE: - python sample_agents_stream_iteration_with_toolset.py + python sample_agents_stream_iteration.py Before running the sample: diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py index 1edb7bd45410..aa536509e19b 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py @@ -53,12 +53,12 @@ with ai_client: # upload a file and wait for it to be processed - file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS, sleep_interval=4) + file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) print(f"Uploaded file, file ID: {file.id}") # create a vector store with the file and wait for it to be processed # if you do not specify a vector store, create_message will create a vector store with a default expiration policy of seven days after they were last active - vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[file.id], name="sample_vector_store", sleep_interval=4) + vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[file.id], name="sample_vector_store") print(f"Created vector store, vector store ID: {vector_store.id}") file_search_tool = FileSearchTool() @@ -80,7 +80,7 @@ message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment]) print(f"Created message, message ID: {message.id}") - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, sleep_interval=4) + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Created run, run ID: {run.id}") ai_client.agents.delete_file(file.id) From 4b38a6fedd787135ca9c715b09c2645becd26f49 Mon Sep 17 00:00:00 2001 From: howieleung Date: Thu, 17 Oct 2024 16:21:41 -0700 Subject: [PATCH 032/138] create_vector_store_file_batch_and_poll + samples and update aio using the latest code from syn (#37952) * create_vector_store_file_batch_and_poll + samples and update aio using the latest code from sync * Resolved comments --- .../azure/ai/client/aio/operations/_patch.py | 178 +++++++++++++++++- .../azure/ai/client/models/_patch.py | 4 +- .../azure/ai/client/operations/_patch.py | 114 ++++++++++- ...ts_vector_store_batch_file_search_async.py | 103 ++++++++++ ...e_agents_vector_store_batch_file_search.py | 98 ++++++++++ 5 files changed, 491 insertions(+), 6 deletions(-) create mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py create mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index b919f922cdd8..0415b71cd31b 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -308,6 +308,64 @@ async def create_agent( :raises ~azure.core.exceptions.HttpResponseError: """ + @overload + async def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.client.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode + or ~azure.ai.client.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.client.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + @overload async def create_agent( self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -353,7 +411,8 @@ async def create_agent( :param instructions: System instructions for the agent. :param tools: List of tools definitions for the agent. :param tool_resources: Resources used by the agent's tools. - :param toolset: Collection of tools (alternative to `tools` and `tool_resources`). + :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). :param temperature: Sampling temperature for generating agent responses. :param top_p: Nucleus sampling parameter. :param response_format: Response format for tool calls. @@ -1338,7 +1397,8 @@ async def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handle if toolset: tool_outputs = await toolset.execute_tool_calls(tool_calls) else: - raise ValueError("Toolset is not available in the client.") + logger.warning("Toolset is not available in the client.") + return logger.info(f"Tool outputs: {tool_outputs}") if tool_outputs: @@ -1647,7 +1707,7 @@ async def create_vector_store_and_poll( sleep_interval: float = 1, **kwargs: Any ) -> _models.VectorStore: - """Creates a vector store. + """Creates a vector store and poll. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -1698,6 +1758,118 @@ async def create_vector_store_and_poll( return vector_store + @overload + async def create_vector_store_file_batch_and_poll( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + *, + file_ids: List[str], + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch_and_poll( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = None, + *, + file_ids: List[str] = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is None: + vector_store_file_batch = await super().create_vector_store_file_batch(vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs) + else: + content_type = kwargs.get("content_type", "application/json") + vector_store_file_batch = await super().create_vector_store_file_batch(body=body, content_type=content_type, **kwargs) + + while vector_store_file_batch.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file_batch = await super().get_vector_store_file_batch(vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id) + + return vector_store_file_batch + __all__: List[str] = [ "AgentsOperations", diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index 33c403ff5a33..fb3e6f883584 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -297,8 +297,8 @@ class FileSearchTool(Tool): A tool that searches for uploaded file information from the created vector stores. """ - def __init__(self): - self.vector_store_ids = [] + def __init__(self, vector_store_ids: List[str] = []): + self.vector_store_ids = vector_store_ids def add_vector_store(self, store_id: str): """ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index 4e654fd45d28..2ce53b5a8f95 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -1720,7 +1720,7 @@ def create_vector_store_and_poll( sleep_interval: float = 1, **kwargs: Any, ) -> _models.VectorStore: - """Creates a vector store. + """Creates a vector store and poll. :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] @@ -1771,6 +1771,118 @@ def create_vector_store_and_poll( return vector_store + @overload + def create_vector_store_file_batch_and_poll( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + *, + file_ids: List[str], + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch_and_poll( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = None, + *, + file_ids: List[str] = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.client.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is None: + vector_store_file_batch = super().create_vector_store_file_batch(vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs) + else: + content_type = kwargs.get("content_type", "application/json") + vector_store_file_batch = super().create_vector_store_file_batch(body=body, content_type=content_type, **kwargs) + + while vector_store_file_batch.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file_batch = super().get_vector_store_file_batch(vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id) + + return vector_store_file_batch + __all__: List[str] = [ "AgentsOperations", diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py new file mode 100644 index 000000000000..c46370febe05 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -0,0 +1,103 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_vector_store_batch_file_search_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_vector_store_batch_file_search_async.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import asyncio +import os +from azure.ai.client.aio import AzureAIClient +from azure.ai.client.models import FileSearchTool, FilePurpose +from azure.identity import DefaultAzureCredential + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + ) + + # Or, you can create the Azure AI Client by giving all required parameters directly + """ + ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + ) + """ + + async with ai_client: + + # upload a file and wait for it to be processed + file = await ai_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # create a vector store with no file and wait for it to be processed + # if you do not specify a vector store, create_message will create a vector store with a default expiration policy of seven days after they were last active + vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # add the file to the vector store + vector_store_file_batch = await ai_client.agents.create_vector_store_file_batch_and_poll(vector_store_id=vector_store.id, file_ids=[file.id]) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to search the file + # also, you do not need to provide tool_resources if you did not create a vector store above + agent = await ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await ai_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?") + print(f"Created message, message ID: {message.id}") + + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + await ai_client.agents.delete_file(file.id) + print("Deleted file") + + await ai_client.agents.delete_vector_store(vector_store.id) + print("Deleted vectore store") + + await ai_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py new file mode 100644 index 000000000000..a17607733153 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -0,0 +1,98 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_vector_store_batch_file_search_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_vector_store_batch_file_search_async.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set this environment variables with your own values: + AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.client import AzureAIClient +from azure.ai.client.models import FileSearchTool, FilePurpose +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] +) + +# Or, you can create the Azure AI Client by giving all required parameters directly +""" +ai_client = AzureAIClient( + credential=DefaultAzureCredential(), + host_name=os.environ["AI_CLIENT_HOST_NAME"], + subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], + resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], + workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging +) +""" + +with ai_client: + + # upload a file and wait for it to be processed + file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # create a vector store with no file and wait for it to be processed + # if you do not specify a vector store, create_message will create a vector store with a default expiration policy of seven days after they were last active + vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # add the file to the vector store + vector_store_file_batch = ai_client.agents.create_vector_store_file_batch_and_poll(vector_store_id=vector_store.id, file_ids=[file.id]) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to search the file + # also, you do not need to provide tool_resources if you did not create a vector store above + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = ai_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?") + print(f"Created message, message ID: {message.id}") + + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + ai_client.agents.delete_file(file.id) + print("Deleted file") + + ai_client.agents.delete_vector_store(vector_store.id) + print("Deleted vectore store") + + ai_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") From 0b2a410382d29c10843cb5232d27f13840eefa4b Mon Sep 17 00:00:00 2001 From: howieleung Date: Thu, 17 Oct 2024 16:28:47 -0700 Subject: [PATCH 033/138] Clean up on the Optional (#37971) --- .../azure/ai/client/aio/operations/_patch.py | 12 ++++++------ .../azure/ai/client/operations/_patch.py | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index 0415b71cd31b..716b6fef4ecd 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -1456,11 +1456,11 @@ async def upload_file(self, file_path: str, *, purpose: str, **kwargs: Any) -> _ @distributed_trace_async async def upload_file( self, - body: Union[JSON, None] = None, + body: Optional[JSON] = None, *, - file: Union[FileType, None] = None, + file: Optional[FileType] = None, file_path: Optional[str] = None, - purpose: Optional[Union[str, _models.FilePurpose]] = None, + purpose: Union[str, _models.FilePurpose, None] = None, filename: Optional[str] = None, **kwargs: Any, ) -> _models.OpenAIFile: @@ -1569,11 +1569,11 @@ async def upload_file_and_poll( @distributed_trace_async async def upload_file_and_poll( self, - body: Union[JSON, None] = None, + body: Optional[JSON] = None, *, - file: Union[FileType, None] = None, + file: Optional[FileType] = None, file_path: Optional[str] = None, - purpose: Optional[Union[str, _models.FilePurpose]] = None, + purpose: Union[str, _models.FilePurpose, None] = None, filename: Optional[str] = None, sleep_interval: float = 1, **kwargs: Any, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index 2ce53b5a8f95..a015a9802026 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -1469,11 +1469,11 @@ def upload_file(self, file_path: str, *, purpose: str, **kwargs: Any) -> _models @distributed_trace def upload_file( self, - body: Union[JSON, None] = None, + body: Optional[JSON] = None, *, - file: Union[FileType, None] = None, + file: Optional[FileType] = None, file_path: Optional[str] = None, - purpose: Optional[Union[str, _models.FilePurpose]] = None, + purpose: Union[str, _models.FilePurpose, None] = None, filename: Optional[str] = None, **kwargs: Any, ) -> _models.OpenAIFile: @@ -1582,11 +1582,11 @@ def upload_file_and_poll( @distributed_trace def upload_file_and_poll( self, - body: Union[JSON, None] = None, + body: Optional[JSON] = None, *, - file: Union[FileType, None] = None, + file: Optional[FileType] = None, file_path: Optional[str] = None, - purpose: Optional[Union[str, _models.FilePurpose]] = None, + purpose: Union[str, _models.FilePurpose, None] = None, filename: Optional[str] = None, sleep_interval: float = 1, **kwargs: Any, From 306cb0e237efd1f02f40db9c8de5d1024242aee0 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 18 Oct 2024 08:45:44 -0700 Subject: [PATCH 034/138] Switch from .endpoints to .connection (re-emit from latest TypeSpec) --- .../azure/ai/client/_client.py | 8 +- .../azure-ai-client/azure/ai/client/_patch.py | 4 +- .../azure/ai/client/aio/_client.py | 8 +- .../azure/ai/client/aio/_patch.py | 4 +- .../ai/client/aio/operations/__init__.py | 4 +- .../ai/client/aio/operations/_operations.py | 12 +- .../azure/ai/client/aio/operations/_patch.py | 140 +++++++------- .../azure/ai/client/models/__init__.py | 4 +- .../azure/ai/client/models/_enums.py | 20 +- .../azure/ai/client/models/_models.py | 16 +- .../azure/ai/client/models/_patch.py | 28 ++- .../azure/ai/client/operations/__init__.py | 4 +- .../azure/ai/client/operations/_operations.py | 12 +- .../azure/ai/client/operations/_patch.py | 144 +++++++------- .../sample_agents_basics_async.py | 12 -- .../sample_agents_functions_async.py | 12 -- ...sample_agents_stream_eventhandler_async.py | 25 +-- ..._stream_eventhandler_with_toolset_async.py | 32 +--- .../sample_agents_stream_iteration_async.py | 23 +-- ...gents_with_file_search_attachment_async.py | 55 +++--- .../async_samples/user_async_functions.py | 13 +- ...mple_agents_code_interpreter_attachment.py | 36 ++-- .../agents/sample_agents_file_search.py | 28 +-- .../samples/agents/sample_agents_functions.py | 17 +- .../sample_agents_stream_eventhandler.py | 1 + ...ents_stream_eventhandler_with_functions.py | 45 ++--- ...agents_stream_eventhandler_with_toolset.py | 25 +-- ...le_agents_stream_iteration_with_toolset.py | 19 +- ...mple_agents_with_file_search_attachment.py | 43 ++--- .../sample_connections_async.py} | 94 ++++----- .../samples/connections/sample_connections.py | 120 ++++++++++++ .../samples/endpoints/sample_endpoints.py | 114 ----------- .../sample_evaluations_schedules.py | 32 ++-- .../sample_get_embeddings_client_async.py | 1 - .../tests/agents/test_agents_client.py | 178 +++++++++++------- sdk/ai/azure-ai-client/tsp-location.yaml | 2 +- 36 files changed, 636 insertions(+), 699 deletions(-) rename sdk/ai/azure-ai-client/samples/{endpoints/async_samples/sample_endpoints_async.py => connections/async_samples/sample_connections_async.py} (50%) create mode 100644 sdk/ai/azure-ai-client/samples/connections/sample_connections.py delete mode 100644 sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/_client.py index c697bfab91e5..cc8c0f6da8b1 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_client.py @@ -16,7 +16,7 @@ from ._configuration import AzureAIClientConfiguration from ._serialization import Deserializer, Serializer -from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations if TYPE_CHECKING: from azure.core.credentials import TokenCredential @@ -27,8 +27,8 @@ class AzureAIClient: :ivar agents: AgentsOperations operations :vartype agents: azure.ai.client.operations.AgentsOperations - :ivar endpoints: EndpointsOperations operations - :vartype endpoints: azure.ai.client.operations.EndpointsOperations + :ivar connections: ConnectionsOperations operations + :vartype connections: azure.ai.client.operations.ConnectionsOperations :ivar evaluations: EvaluationsOperations operations :vartype evaluations: azure.ai.client.operations.EvaluationsOperations :param endpoint: The Azure AI Studio project endpoint, in the form @@ -92,7 +92,7 @@ def __init__( self._deserialize = Deserializer() self._serialize.client_side_validation = False self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) - self.endpoints = EndpointsOperations(self._client, self._config, self._serialize, self._deserialize) + self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index 1e2704985699..b0add1c9c19b 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -13,7 +13,7 @@ from azure.core.pipeline import policies from ._configuration import AzureAIClientConfiguration from ._serialization import Deserializer, Serializer -from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations from ._client import AzureAIClient as ClientGenerated from .operations._patch import InferenceOperations @@ -146,7 +146,7 @@ def __init__( self._deserialize = Deserializer() self._serialize.client_side_validation = False - self.endpoints = EndpointsOperations(self._client1, self._config1, self._serialize, self._deserialize) + self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) self.inference = InferenceOperations(self) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py index 99f26fac57ca..64cba8c69b6b 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py @@ -16,7 +16,7 @@ from .._serialization import Deserializer, Serializer from ._configuration import AzureAIClientConfiguration -from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential @@ -27,8 +27,8 @@ class AzureAIClient: :ivar agents: AgentsOperations operations :vartype agents: azure.ai.client.aio.operations.AgentsOperations - :ivar endpoints: EndpointsOperations operations - :vartype endpoints: azure.ai.client.aio.operations.EndpointsOperations + :ivar connections: ConnectionsOperations operations + :vartype connections: azure.ai.client.aio.operations.ConnectionsOperations :ivar evaluations: EvaluationsOperations operations :vartype evaluations: azure.ai.client.aio.operations.EvaluationsOperations :param endpoint: The Azure AI Studio project endpoint, in the form @@ -92,7 +92,7 @@ def __init__( self._deserialize = Deserializer() self._serialize.client_side_validation = False self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) - self.endpoints = EndpointsOperations(self._client, self._config, self._serialize, self._deserialize) + self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) def send_request( diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py index 06729aba0632..43bee04b4830 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py @@ -13,7 +13,7 @@ from .._serialization import Deserializer, Serializer from ._configuration import AzureAIClientConfiguration -from .operations import AgentsOperations, EndpointsOperations, EvaluationsOperations +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations from ._client import AzureAIClient as ClientGenerated from .operations._patch import InferenceOperations @@ -146,7 +146,7 @@ def __init__( self._deserialize = Deserializer() self._serialize.client_side_validation = False - self.endpoints = EndpointsOperations(self._client1, self._config1, self._serialize, self._deserialize) + self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) self.inference = InferenceOperations(self) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py index 1a46c6d8abb9..56224bae24a5 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py @@ -7,7 +7,7 @@ # -------------------------------------------------------------------------- from ._operations import AgentsOperations -from ._operations import EndpointsOperations +from ._operations import ConnectionsOperations from ._operations import EvaluationsOperations from ._patch import __all__ as _patch_all @@ -16,7 +16,7 @@ __all__ = [ "AgentsOperations", - "EndpointsOperations", + "ConnectionsOperations", "EvaluationsOperations", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py index b9753729df22..989d32dfa108 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py @@ -73,8 +73,8 @@ build_agents_update_run_request, build_agents_update_thread_request, build_agents_upload_file_request, - build_endpoints_list_request, - build_endpoints_list_secrets_request, + build_connections_list_request, + build_connections_list_secrets_request, build_evaluations_create_or_replace_schedule_request, build_evaluations_create_request, build_evaluations_delete_schedule_request, @@ -4946,14 +4946,14 @@ async def list_vector_store_file_batch_files( return deserialized # type: ignore -class EndpointsOperations: +class ConnectionsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.client.aio.AzureAIClient`'s - :attr:`endpoints` attribute. + :attr:`connections` attribute. """ def __init__(self, *args, **kwargs) -> None: @@ -4984,7 +4984,7 @@ async def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) - _request = build_endpoints_list_request( + _request = build_connections_list_request( api_version=self._config.api_version, headers=_headers, params=_params, @@ -5124,7 +5124,7 @@ async def _list_secrets( # pylint: disable=protected-access else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_endpoints_list_secrets_request( + _request = build_connections_list_secrets_request( connection_name_in_url=connection_name_in_url, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index 716b6fef4ecd..ab68d88d4309 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -1,4 +1,5 @@ # pylint: disable=too-many-lines +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -15,10 +16,10 @@ from typing import IO, Any, AsyncIterator, Dict, List, AsyncIterable, MutableMapping, Optional, Union, cast, overload from azure.ai.client import _types -from ._operations import EndpointsOperations as EndpointsOperationsGenerated +from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated from ._operations import AgentsOperations as AgentsOperationsGenerated -from ...models._patch import EndpointProperties -from ...models._enums import AuthenticationType, EndpointType, FilePurpose +from ...models._patch import ConnectionProperties +from ...models._enums import AuthenticationType, ConnectionType, FilePurpose from ...models._models import ConnectionsListSecretsResponse, ConnectionsListResponse from ... import models as _models from azure.core.tracing.decorator_async import distributed_trace_async @@ -35,11 +36,11 @@ def __init__(self, outer_instance): self.outer_instance = outer_instance async def get_chat_completions_client(self) -> "ChatCompletionsClient": - endpoint = await self.outer_instance.endpoints.get_default( - endpoint_type=EndpointType.SERVERLESS, populate_secrets=True + connection = await self.outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, populate_secrets=True ) - if not endpoint: - raise ValueError("No serverless endpoint found") + if not connection: + raise ValueError("No serverless connection found") try: from azure.ai.inference.aio import ChatCompletionsClient @@ -48,38 +49,38 @@ async def get_chat_completions_client(self) -> "ChatCompletionsClient": "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) - if endpoint.authentication_type == AuthenticationType.API_KEY: + if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" ) from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) - elif endpoint.authentication_type == AuthenticationType.AAD: + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" ) client = ChatCompletionsClient( - endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential + endpoint=connection.endpoint_url, credential=connection.properties.token_credential ) - elif endpoint.authentication_type == AuthenticationType.SAS: + elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" ) - client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=endpoint.token_credential) + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) else: raise ValueError("Unknown authentication type") return client async def get_embeddings_client(self) -> "EmbeddingsClient": - endpoint = await self.outer_instance.endpoints.get_default( - endpoint_type=EndpointType.SERVERLESS, populate_secrets=True + connection = await self.outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, populate_secrets=True ) - if not endpoint: - raise ValueError("No serverless endpoint found") + if not connection: + raise ValueError("No serverless connection found") try: from azure.ai.inference.aio import EmbeddingsClient @@ -88,36 +89,36 @@ async def get_embeddings_client(self) -> "EmbeddingsClient": "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) - if endpoint.authentication_type == AuthenticationType.API_KEY: + if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" ) from azure.core.credentials import AzureKeyCredential - client = EmbeddingsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) - elif endpoint.authentication_type == AuthenticationType.AAD: + client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" ) - client = EmbeddingsClient(endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential) - elif endpoint.authentication_type == AuthenticationType.SAS: + client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=connection.properties.token_credential) + elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" ) - client = EmbeddingsClient(endpoint=endpoint.endpoint_url, credential=endpoint.token_credential) + client = EmbeddingsClient(endpoint=connection.connection_url, credential=connection.token_credential) else: raise ValueError("Unknown authentication type") return client async def get_azure_openai_client(self) -> "AsyncAzureOpenAI": - endpoint = await self.outer_instance.endpoints.get_default( - endpoint_type=EndpointType.AZURE_OPEN_AI, populate_secrets=True + connection = await self.outer_instance.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, populate_secrets=True ) - if not endpoint: - raise ValueError("No Azure OpenAI endpoint found.") + if not connection: + raise ValueError("No Azure OpenAI connection found.") try: from openai import AsyncAzureOpenAI @@ -128,14 +129,14 @@ async def get_azure_openai_client(self) -> "AsyncAzureOpenAI": # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs AZURE_OPENAI_API_VERSION = "2024-06-01" - if endpoint.authentication_type == AuthenticationType.API_KEY: + if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" ) client = AsyncAzureOpenAI( - api_key=endpoint.key, azure_endpoint=endpoint.endpoint_url, api_version=AZURE_OPENAI_API_VERSION + api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION ) - elif endpoint.authentication_type == AuthenticationType.AAD: + elif connection.authentication_type == AuthenticationType.AAD: logger.debug( "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" ) @@ -148,18 +149,18 @@ async def get_azure_openai_client(self) -> "AsyncAzureOpenAI": client = AsyncAzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( - endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + connection.token_credential, "https://cognitiveservices.azure.com/.default" ), - azure_endpoint=endpoint.endpoint_url, + azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION, ) - elif endpoint.authentication_type == AuthenticationType.SAS: + elif connection.authentication_type == AuthenticationType.SAS: logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") client = AsyncAzureOpenAI( azure_ad_token_provider=get_bearer_token_provider( - endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + connection.token_credential, "https://cognitiveservices.azure.com/.default" ), - azure_endpoint=endpoint.endpoint_url, + azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION, ) else: @@ -168,30 +169,30 @@ async def get_azure_openai_client(self) -> "AsyncAzureOpenAI": return client -class EndpointsOperations(EndpointsOperationsGenerated): +class ConnectionsOperations(ConnectionsOperationsGenerated): - async def get_default(self, *, endpoint_type: EndpointType, populate_secrets: bool = False) -> EndpointProperties: - if not endpoint_type: - raise ValueError("You must specify an endpoint type") + async def get_default(self, *, connection_type: ConnectionType, populate_secrets: bool = False) -> ConnectionProperties: + if not connection_type: + raise ValueError("You must specify an connection type") # Since there is no notion of service default at the moment, always return the first one - async for endpoint_properties in self.list(endpoint_type=endpoint_type, populate_secrets=populate_secrets): - return endpoint_properties + async for connection_properties in self.list(connection_type=connection_type, populate_secrets=populate_secrets): + return connection_properties return None - async def get(self, *, endpoint_name: str, populate_secrets: bool = False) -> EndpointProperties: - if not endpoint_name: + async def get(self, *, connection_name: str, populate_secrets: bool = False) -> ConnectionProperties: + if not connection_name: raise ValueError("Endpoint name cannot be empty") if populate_secrets: connection: ConnectionsListSecretsResponse = await self._list_secrets( - connection_name_in_url=endpoint_name, - connection_name=endpoint_name, + connection_name_in_url=connection_name, + connection_name=connection_name, subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, workspace_name=self._config.project_name, api_version_in_body=self._config.api_version, ) if connection.properties.auth_type == AuthenticationType.AAD: - return EndpointProperties(connection=connection, token_credential=self._config.credential) + return ConnectionProperties(connection=connection, token_credential=self._config.credential) elif connection.properties.auth_type == AuthenticationType.SAS: from ...models._patch import SASTokenCredential @@ -201,32 +202,32 @@ async def get(self, *, endpoint_name: str, populate_secrets: bool = False) -> En subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, project_name=self._config.project_name, - connection_name=endpoint_name, + connection_name=connection_name, ) - return EndpointProperties(connection=connection, token_credential=token_credential) + return ConnectionProperties(connection=connection, token_credential=token_credential) - return EndpointProperties(connection=connection) + return ConnectionProperties(connection=connection) else: internal_response: ConnectionsListResponse = await self._list() for connection in internal_response.value: - if endpoint_name == connection.name: - return EndpointProperties(connection=connection) + if connection_name == connection.name: + return ConnectionProperties(connection=connection) return None async def list( - self, *, endpoint_type: EndpointType | None = None, populate_secrets: bool = False - ) -> AsyncIterable[EndpointProperties]: + self, *, connection_type: ConnectionType | None = None, populate_secrets: bool = False + ) -> AsyncIterable[ConnectionProperties]: # First make a REST call to /list to get all the connections, without secrets connections_list: ConnectionsListResponse = await self._list() # Filter by connection type for connection in connections_list.value: - if endpoint_type is None or connection.properties.category == endpoint_type: + if connection_type is None or connection.properties.category == connection_type: if not populate_secrets: - yield EndpointProperties(connection=connection) + yield ConnectionProperties(connection=connection) else: - yield await self.get(endpoint_name=connection.name, populate_secrets=True) + yield await self.get(connection_name=connection.name, populate_secrets=True) class AgentsOperations(AgentsOperationsGenerated): @@ -1129,7 +1130,7 @@ async def create_stream( else: raise ValueError("Invalid combination of arguments provided.") - + response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) @@ -1385,8 +1386,10 @@ async def submit_tool_outputs_to_stream( response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - async def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handler: Optional[_models.AsyncAgentEventHandler] = None) -> None: + + async def _handle_submit_tool_outputs( + self, run: _models.ThreadRun, event_handler: Optional[_models.AsyncAgentEventHandler] = None + ) -> None: if isinstance(run.required_action, _models.SubmitToolOutputsAction): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: @@ -1403,12 +1406,9 @@ async def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handle logger.info(f"Tool outputs: {tool_outputs}") if tool_outputs: async with await self.submit_tool_outputs_to_stream( - thread_id=run.thread_id, - run_id=run.id, - tool_outputs=tool_outputs, - event_handler=event_handler - ) as stream: - await stream.until_done() + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler + ) as stream: + await stream.until_done() @overload async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: @@ -1613,7 +1613,7 @@ async def upload_file_and_poll( uploaded_file = await self.get_file(uploaded_file.id) return uploaded_file - + @overload async def create_vector_store_and_poll( self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any @@ -1705,7 +1705,7 @@ async def create_vector_store_and_poll( chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, sleep_interval: float = 1, - **kwargs: Any + **kwargs: Any, ) -> _models.VectorStore: """Creates a vector store and poll. @@ -1733,7 +1733,7 @@ async def create_vector_store_and_poll( :rtype: ~azure.ai.client.models.VectorStore :raises ~azure.core.exceptions.HttpResponseError: """ - + if body is not None: vector_store = await self.create_vector_store(body=body, content_type=content_type, **kwargs) elif file_ids is not None or (name is not None and expires_after is not None): @@ -1744,7 +1744,7 @@ async def create_vector_store_and_poll( expires_after=expires_after, chunking_strategy=chunking_strategy, metadata=metadata, - **kwargs + **kwargs, ) else: raise ValueError( @@ -1873,7 +1873,7 @@ async def create_vector_store_file_batch_and_poll( __all__: List[str] = [ "AgentsOperations", - "EndpointsOperations", + "ConnectionsOperations", "InferenceOperations", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py index aac7dc898b4b..9678b219f046 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py @@ -152,8 +152,8 @@ from ._enums import AgentsNamedToolChoiceType from ._enums import ApiResponseFormat from ._enums import AuthenticationType +from ._enums import ConnectionType from ._enums import DoneEvent -from ._enums import EndpointType from ._enums import ErrorEvent from ._enums import FilePurpose from ._enums import FileState @@ -331,8 +331,8 @@ "AgentsNamedToolChoiceType", "ApiResponseFormat", "AuthenticationType", + "ConnectionType", "DoneEvent", - "EndpointType", "ErrorEvent", "FilePurpose", "FileState", diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py index 8dc539649db7..c48d08daeb5b 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py @@ -95,7 +95,7 @@ class AgentStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type RunStep""" THREAD_RUN_STEP_DELTA = "thread.run.step.delta" - """Event sent when a run stepis being streamed. The data of this event is of type + """Event sent when a run step is being streamed. The data of this event is of type RunStepDeltaChunk""" THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" """Event sent when a run step is completed. The data of this event is of type RunStep""" @@ -143,14 +143,7 @@ class AuthenticationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Shared Access Signature (SAS) authentication""" -class DoneEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Terminal event indicating the successful end of a stream.""" - - DONE = "done" - """Event sent when the stream is done.""" - - -class EndpointType(str, Enum, metaclass=CaseInsensitiveEnumMeta): +class ConnectionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The Type (or category) of the connection.""" AZURE_OPEN_AI = "AzureOpenAI" @@ -159,6 +152,13 @@ class EndpointType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Serverless API service""" +class DoneEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Terminal event indicating the successful end of a stream.""" + + DONE = "done" + """Event sent when the stream is done.""" + + class ErrorEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Terminal event indicating a server side error while streaming.""" @@ -349,7 +349,7 @@ class RunStepStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type RunStep""" THREAD_RUN_STEP_DELTA = "thread.run.step.delta" - """Event sent when a run stepis being streamed. The data of this event is of type + """Event sent when a run step is being streamed. The data of this event is of type RunStepDeltaChunk""" THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" """Event sent when a run step is completed. The data of this event is of type RunStep""" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py index d7d0c49738f3..248f6023dc00 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py @@ -662,7 +662,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles class ConnectionProperties(_model_base.Model): - """Connetion properties. + """Connection properties. You probably want to use the sub-classes and not this class directly. Known sub-classes are: ConnectionPropertiesAADAuth, ConnectionPropertiesApiKeyAuth, ConnectionPropertiesSASAuth @@ -689,14 +689,14 @@ class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): :vartype auth_type: str or ~azure.ai.client.models.AAD :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and "Serverless". - :vartype category: str or ~azure.ai.client.models.EndpointType + :vartype category: str or ~azure.ai.client.models.ConnectionType :ivar target: The connection URL to be used for this service. Required. :vartype target: str """ auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. Entra ID authentication""" - category: Union[str, "_models.EndpointType"] = rest_field() + category: Union[str, "_models.ConnectionType"] = rest_field() """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" target: str = rest_field() """The connection URL to be used for this service. Required.""" @@ -710,7 +710,7 @@ class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey :vartype auth_type: str or ~azure.ai.client.models.API_KEY :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and "Serverless". - :vartype category: str or ~azure.ai.client.models.EndpointType + :vartype category: str or ~azure.ai.client.models.ConnectionType :ivar credentials: Credentials will only be present for authType=ApiKey. Required. :vartype credentials: ~azure.ai.client.models._models.CredentialsApiKeyAuth :ivar target: The connection URL to be used for this service. Required. @@ -719,7 +719,7 @@ class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. API Key authentication""" - category: Union[str, "_models.EndpointType"] = rest_field() + category: Union[str, "_models.ConnectionType"] = rest_field() """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" @@ -736,7 +736,7 @@ class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): :vartype auth_type: str or ~azure.ai.client.models.SAS :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and "Serverless". - :vartype category: str or ~azure.ai.client.models.EndpointType + :vartype category: str or ~azure.ai.client.models.ConnectionType :ivar credentials: Credentials will only be present for authType=ApiKey. Required. :vartype credentials: ~azure.ai.client.models._models.CredentialsSASAuth :ivar target: The connection URL to be used for this service. Required. @@ -746,7 +746,7 @@ class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. Shared Access Signature (SAS) authentication""" - category: Union[str, "_models.EndpointType"] = rest_field() + category: Union[str, "_models.ConnectionType"] = rest_field() """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" credentials: "_models._models.CredentialsSASAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" @@ -824,7 +824,7 @@ class CredentialsApiKeyAuth(_model_base.Model): class CredentialsSASAuth(_model_base.Model): - """The credentials neede for Shared Access Signatures (SAS) authentication. + """The credentials needed for Shared Access Signatures (SAS) authentication. :ivar sas: The Shared Access Signatures (SAS) token. Required. diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index fb3e6f883584..b845f535cfec 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -41,12 +41,12 @@ logger = logging.getLogger(__name__) -class EndpointProperties: +class ConnectionProperties: def __init__(self, *, connection: ConnectionsListSecretsResponse, token_credential: TokenCredential = None) -> None: self.name = connection.name self.authentication_type = connection.properties.auth_type - self.endpoint_type = connection.properties.category + self.connection_type = connection.properties.category self.endpoint_url = ( connection.properties.target[:-1] if connection.properties.target.endswith("/") @@ -62,7 +62,7 @@ def __str__(self): out = "{\n" out += f' "name": "{self.name}",\n' out += f' "authentication_type": "{self.authentication_type}",\n' - out += f' "endpoint_type": "{self.endpoint_type}",\n' + out += f' "connection_type": "{self.connection_type}",\n' out += f' "endpoint_url": "{self.endpoint_url}",\n' out += f' "key": "{self.key}",\n' if self.token_credential: @@ -116,7 +116,7 @@ def _refresh_token(self) -> None: project_name=self._project_name, ) - connection = ai_client.endpoints.get(endpoint_name=self._connection_name, populate_secrets=True) + connection = ai_client.endpoints.get(connection_name=self._connection_name, populate_secrets=True) self._sas_token = connection.properties.credentials.sas self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) @@ -605,13 +605,13 @@ def __init__( self, response_iterator: AsyncIterator[bytes], submit_tool_outputs: Callable[[ThreadRun, Optional[AsyncAgentEventHandler]], Awaitable[None]], - event_handler: Optional['AsyncAgentEventHandler'] = None, + event_handler: Optional["AsyncAgentEventHandler"] = None, ): self.response_iterator = response_iterator self.event_handler = event_handler self.done = False self.buffer = "" - self.submit_tool_outputs = submit_tool_outputs + self.submit_tool_outputs = submit_tool_outputs async def __aenter__(self): return self @@ -706,8 +706,12 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: async def _process_event(self, event_data_str: str) -> Tuple[str, Any]: event_type, event_data_obj = self._parse_event_data(event_data_str) - if isinstance(event_data_obj, ThreadRun) and event_data_obj.status == "requires_action" and isinstance(event_data_obj.required_action, SubmitToolOutputsAction): - await self.submit_tool_outputs(event_data_obj, self.event_handler) + if ( + isinstance(event_data_obj, ThreadRun) + and event_data_obj.status == "requires_action" + and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) + ): + await self.submit_tool_outputs(event_data_obj, self.event_handler) if self.event_handler: try: if isinstance(event_data_obj, MessageDeltaChunk): @@ -849,8 +853,12 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: def _process_event(self, event_data_str: str) -> Tuple[str, Any]: event_type, event_data_obj = self._parse_event_data(event_data_str) - if isinstance(event_data_obj, ThreadRun) and event_data_obj.status == "requires_action" and isinstance(event_data_obj.required_action, SubmitToolOutputsAction): - self.submit_tool_outputs(event_data_obj, self.event_handler) + if ( + isinstance(event_data_obj, ThreadRun) + and event_data_obj.status == "requires_action" + and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) + ): + self.submit_tool_outputs(event_data_obj, self.event_handler) if self.event_handler: try: if isinstance(event_data_obj, MessageDeltaChunk): diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py index 1a46c6d8abb9..56224bae24a5 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py @@ -7,7 +7,7 @@ # -------------------------------------------------------------------------- from ._operations import AgentsOperations -from ._operations import EndpointsOperations +from ._operations import ConnectionsOperations from ._operations import EvaluationsOperations from ._patch import __all__ as _patch_all @@ -16,7 +16,7 @@ __all__ = [ "AgentsOperations", - "EndpointsOperations", + "ConnectionsOperations", "EvaluationsOperations", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py index de3e70aa578a..5f738c262c38 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py @@ -1164,7 +1164,7 @@ def build_agents_list_vector_store_file_batch_files_request( # pylint: disable= return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_endpoints_list_request(**kwargs: Any) -> HttpRequest: +def build_connections_list_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -1183,7 +1183,7 @@ def build_endpoints_list_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_endpoints_list_secrets_request(connection_name_in_url: str, **kwargs: Any) -> HttpRequest: +def build_connections_list_secrets_request(connection_name_in_url: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -6260,14 +6260,14 @@ def list_vector_store_file_batch_files( return deserialized # type: ignore -class EndpointsOperations: +class ConnectionsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.client.AzureAIClient`'s - :attr:`endpoints` attribute. + :attr:`connections` attribute. """ def __init__(self, *args, **kwargs): @@ -6298,7 +6298,7 @@ def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: # py cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) - _request = build_endpoints_list_request( + _request = build_connections_list_request( api_version=self._config.api_version, headers=_headers, params=_params, @@ -6438,7 +6438,7 @@ def _list_secrets( # pylint: disable=protected-access else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_endpoints_list_secrets_request( + _request = build_connections_list_secrets_request( connection_name_in_url=connection_name_in_url, content_type=content_type, api_version=self._config.api_version, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index a015a9802026..b64000ce6e40 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -1,5 +1,6 @@ # pylint: disable=too-many-lines # pylint: disable=too-many-lines +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -13,12 +14,12 @@ from typing import List, Iterable, Union, IO, Any, Dict, Optional, overload, TYPE_CHECKING, Iterator, cast # from zoneinfo import ZoneInfo -from ._operations import EndpointsOperations as EndpointsOperationsGenerated +from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated from ._operations import AgentsOperations as AgentsOperationsGenerated -from ..models._enums import AuthenticationType, EndpointType +from ..models._enums import AuthenticationType, ConnectionType from ..models._models import ConnectionsListSecretsResponse, ConnectionsListResponse from .._types import AgentsApiResponseFormatOption -from ..models._patch import EndpointProperties +from ..models._patch import ConnectionProperties from ..models._enums import FilePurpose from .._vendor import FileType from .. import models as _models @@ -46,11 +47,11 @@ def __init__(self, outer_instance): self.outer_instance = outer_instance def get_chat_completions_client(self) -> "ChatCompletionsClient": - endpoint = self.outer_instance.endpoints.get_default( - endpoint_type=EndpointType.SERVERLESS, populate_secrets=True + connection = self.outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, populate_secrets=True ) - if not endpoint: - raise ValueError("No serverless endpoint found") + if not connection: + raise ValueError("No serverless connection found") try: from azure.ai.inference import ChatCompletionsClient @@ -59,38 +60,38 @@ def get_chat_completions_client(self) -> "ChatCompletionsClient": "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) - if endpoint.authentication_type == AuthenticationType.API_KEY: + if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" ) from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) - elif endpoint.authentication_type == AuthenticationType.AAD: + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" ) client = ChatCompletionsClient( - endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential + endpoint=connection.endpoint_url, credential=connection.properties.token_credential ) - elif endpoint.authentication_type == AuthenticationType.SAS: + elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" ) - client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=endpoint.token_credential) + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) else: raise ValueError("Unknown authentication type") return client def get_embeddings_client(self) -> "EmbeddingsClient": - endpoint = self.outer_instance.endpoints.get_default( - endpoint_type=EndpointType.SERVERLESS, populate_secrets=True + connection = self.outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, populate_secrets=True ) - if not endpoint: - raise ValueError("No serverless endpoint found") + if not connection: + raise ValueError("No serverless connection found") try: from azure.ai.inference import EmbeddingsClient @@ -99,36 +100,36 @@ def get_embeddings_client(self) -> "EmbeddingsClient": "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) - if endpoint.authentication_type == AuthenticationType.API_KEY: + if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" ) from azure.core.credentials import AzureKeyCredential - client = EmbeddingsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) - elif endpoint.authentication_type == AuthenticationType.AAD: + client = EmbeddingsClient(endpoint=connection.authentication_type, credential=AzureKeyCredential(connection.key)) + elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" ) - client = EmbeddingsClient(endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential) - elif endpoint.authentication_type == AuthenticationType.SAS: + client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=connection.properties.token_credential) + elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" ) - client = EmbeddingsClient(endpoint=endpoint.endpoint_url, credential=endpoint.token_credential) + client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) else: raise ValueError("Unknown authentication type") return client def get_azure_openai_client(self) -> "AzureOpenAI": - endpoint = self.outer_instance.endpoints.get_default( - endpoint_type=EndpointType.AZURE_OPEN_AI, populate_secrets=True + connection = self.outer_instance.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, populate_secrets=True ) - if not endpoint: - raise ValueError("No Azure OpenAI endpoint found") + if not connection: + raise ValueError("No Azure OpenAI connection found") try: from openai import AzureOpenAI @@ -139,14 +140,14 @@ def get_azure_openai_client(self) -> "AzureOpenAI": # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs AZURE_OPENAI_API_VERSION = "2024-06-01" - if endpoint.authentication_type == AuthenticationType.API_KEY: + if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" ) client = AzureOpenAI( - api_key=endpoint.key, azure_endpoint=endpoint.endpoint_url, api_version=AZURE_OPENAI_API_VERSION + api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION ) - elif endpoint.authentication_type == AuthenticationType.AAD: + elif connection.authentication_type == AuthenticationType.AAD: logger.debug( "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" ) @@ -159,18 +160,18 @@ def get_azure_openai_client(self) -> "AzureOpenAI": client = AzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( - endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + connection.token_credential, "https://cognitiveservices.azure.com/.default" ), - azure_endpoint=endpoint.endpoint_url, + azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION, ) - elif endpoint.authentication_type == AuthenticationType.SAS: + elif connection.authentication_type == AuthenticationType.SAS: logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") client = AzureOpenAI( azure_ad_token_provider=get_bearer_token_provider( - endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + connection.token_credential, "https://cognitiveservices.azure.com/.default" ), - azure_endpoint=endpoint.endpoint_url, + azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION, ) else: @@ -179,32 +180,32 @@ def get_azure_openai_client(self) -> "AzureOpenAI": return client -class EndpointsOperations(EndpointsOperationsGenerated): +class ConnectionsOperations(ConnectionsOperationsGenerated): - def get_default(self, *, endpoint_type: EndpointType, populate_secrets: bool = False) -> EndpointProperties: - if not endpoint_type: - raise ValueError("You must specify an endpoint type") - endpoint_properties_list = self.list(endpoint_type=endpoint_type, populate_secrets=populate_secrets) + def get_default(self, *, connection_type: ConnectionType, populate_secrets: bool = False) -> ConnectionProperties: + if not connection_type: + raise ValueError("You must specify an connection type") + connection_properties_list = self.list(connection_type=connection_type, populate_secrets=populate_secrets) # Since there is no notion of service default at the moment, always return the first one - if len(endpoint_properties_list) > 0: - return endpoint_properties_list[0] + if len(connection_properties_list) > 0: + return connection_properties_list[0] else: return None - def get(self, *, endpoint_name: str, populate_secrets: bool = False) -> EndpointProperties: - if not endpoint_name: - raise ValueError("Endpoint name cannot be empty") + def get(self, *, connection_name: str, populate_secrets: bool = False) -> ConnectionProperties: + if not connection_name: + raise ValueError("Connection name cannot be empty") if populate_secrets: connection: ConnectionsListSecretsResponse = self._list_secrets( - connection_name_in_url=endpoint_name, - connection_name=endpoint_name, + connection_name_in_url=connection_name, + connection_name=connection_name, subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, workspace_name=self._config.project_name, api_version_in_body=self._config.api_version, ) if connection.properties.auth_type == AuthenticationType.AAD: - return EndpointProperties(connection=connection, token_credential=self._config.credential) + return ConnectionProperties(connection=connection, token_credential=self._config.credential) elif connection.properties.auth_type == AuthenticationType.SAS: from ..models._patch import SASTokenCredential @@ -214,35 +215,35 @@ def get(self, *, endpoint_name: str, populate_secrets: bool = False) -> Endpoint subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, project_name=self._config.project_name, - connection_name=endpoint_name, + connection_name=connection_name, ) - return EndpointProperties(connection=connection, token_credential=token_credential) + return ConnectionProperties(connection=connection, token_credential=token_credential) - return EndpointProperties(connection=connection) + return ConnectionProperties(connection=connection) else: internal_response: ConnectionsListResponse = self._list() for connection in internal_response.value: - if endpoint_name == connection.name: - return EndpointProperties(connection=connection) + if connection_name == connection.name: + return ConnectionProperties(connection=connection) return None def list( - self, *, endpoint_type: EndpointType | None = None, populate_secrets: bool = False - ) -> Iterable[EndpointProperties]: + self, *, connection_type: ConnectionType | None = None, populate_secrets: bool = False + ) -> Iterable[ConnectionProperties]: # First make a REST call to /list to get all the connections, without secrets connections_list: ConnectionsListResponse = self._list() - endpoint_properties_list: List[EndpointProperties] = [] + connection_properties_list: List[ConnectionProperties] = [] # Filter by connection type for connection in connections_list.value: - if endpoint_type is None or connection.properties.category == endpoint_type: + if connection_type is None or connection.properties.category == connection_type: if not populate_secrets: - endpoint_properties_list.append(EndpointProperties(connection=connection)) + connection_properties_list.append(ConnectionProperties(connection=connection)) else: - endpoint_properties_list.append(self.get(endpoint_name=connection.name, populate_secrets=True)) + connection_properties_list.append(self.get(connection_name=connection.name, populate_secrets=True)) - return endpoint_properties_list + return connection_properties_list class AgentsOperations(AgentsOperationsGenerated): @@ -352,7 +353,7 @@ def create_agent( :paramtype description: str :keyword instructions: The system instructions for the new agent to use. Default value is None. :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. :paramtype toolset: ~azure.ai.client.models.ToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 @@ -424,7 +425,7 @@ def create_agent( :param instructions: System instructions for the agent. :param tools: List of tools definitions for the agent. :param tool_resources: Resources used by the agent's tools. - :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). :param temperature: Sampling temperature for generating agent responses. :param top_p: Nucleus sampling parameter. @@ -1398,8 +1399,10 @@ def submit_tool_outputs_to_stream( response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handler: Optional[_models.AgentEventHandler] = None) -> None: + + def _handle_submit_tool_outputs( + self, run: _models.ThreadRun, event_handler: Optional[_models.AgentEventHandler] = None + ) -> None: if isinstance(run.required_action, _models.SubmitToolOutputsAction): tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: @@ -1412,16 +1415,13 @@ def _handle_submit_tool_outputs(self, run: _models.ThreadRun, event_handler: Opt else: logger.warning("Toolset is not available in the client.") return - + logger.info(f"Tool outputs: {tool_outputs}") if tool_outputs: with self.submit_tool_outputs_to_stream( - thread_id=run.thread_id, - run_id=run.id, - tool_outputs=tool_outputs, - event_handler=event_handler - ) as stream: - stream.until_done() + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler + ) as stream: + stream.until_done() @overload def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: @@ -1886,7 +1886,7 @@ def create_vector_store_file_batch_and_poll( __all__: List[str] = [ "AgentsOperations", - "EndpointsOperations", + "ConnectionsOperations", "InferenceOperations", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py index 2fb06d8c9fdb..94eae399cf63 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py @@ -39,18 +39,6 @@ async def main(): credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) - # Or, you can create the Azure AI Client by giving all required parameters directly - """ - ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging - ) - """ - async with ai_client: agent = await ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py index 33a631f5e503..58d7872e1e16 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py @@ -41,18 +41,6 @@ async def main(): credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) - # Or, you can create the Azure AI Client by giving all required parameters directly - """ - ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging - ) - """ - async with ai_client: # Initialize assistant functions functions = AsyncFunctionTool(functions=user_async_functions) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py index 95f9cde57391..fbd5f1cb4977 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -63,23 +63,10 @@ async def main(): # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) - # Or, you can create the Azure AI Client by giving all required parameters directly - """ - ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging - ) - """ - - async with ai_client: + async with ai_client: agent = await ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" ) @@ -88,13 +75,13 @@ async def main(): thread = await ai_client.agents.create_thread() print(f"Created thread, thread ID {thread.id}") - message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = await ai_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) print(f"Created message, message ID {message.id}") async with await ai_client.agents.create_stream( - thread_id=thread.id, - assistant_id=agent.id, - event_handler=MyEventHandler() + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() ) as stream: await stream.until_done() diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py index 9be377687e31..1f6be096c705 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py @@ -70,45 +70,33 @@ async def main(): # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) - # Or, you can create the Azure AI Client by giving all required parameters directly - """ - ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging - ) - """ - # Initialize toolset with user functions functions = AsyncFunctionTool(user_async_functions) toolset = AsyncToolSet() toolset.add(functions) - - async with ai_client: - + + async with ai_client: + agent = await ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", toolset=toolset ) print(f"Created agent, agent ID: {agent.id}") - thread = await ai_client.agents.create_thread() print(f"Created thread, thread ID {thread.id}") - message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, send an email with the datetime and weather information in New York? Also let me know the details") + message = await ai_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", + ) print(f"Created message, message ID {message.id}") async with await ai_client.agents.create_stream( - thread_id=thread.id, - assistant_id=agent.id, - event_handler=MyEventHandler() + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() ) as stream: await stream.until_done() diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py index de5c64672b96..989c565c43f6 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py @@ -36,32 +36,21 @@ async def main(): # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) - # Or, you can create the Azure AI Client by giving all required parameters directly - """ - ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging - ) - """ - - async with ai_client: + async with ai_client: agent = await ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" ) print(f"Created agent, agent ID: {agent.id}") - + thread = await ai_client.agents.create_thread() print(f"Created thread, thread ID {thread.id}") - message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = await ai_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) print(f"Created message, message ID {message.id}") async with await ai_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py index 790a6117b871..25342eb89930 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -36,54 +36,51 @@ async def main(): # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) - # Or, you can create the Azure AI Client by giving all required parameters directly - """ - ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging - ) - """ - # upload a file and wait for it to be processed async with ai_client: - file = await ai_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS, sleep_interval=4) + file = await ai_client.agents.upload_file_and_poll( + file_path="../product_info_1.md", purpose=FilePurpose.AGENTS, sleep_interval=4 + ) # create a vector store with the file and wait for it to be processed - # if you do not specify a vector store, create_message will create a vector store with a default expiration policy of seven days after they were last active - vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[file.id], name="sample_vector_store", sleep_interval=4) - + # if you do not specify a vector store, create_message will create a vector store with a default expiration policy of seven days after they were last active + vector_store = await ai_client.agents.create_vector_store_and_poll( + file_ids=[file.id], name="sample_vector_store", sleep_interval=4 + ) + file_search_tool = FileSearchToolDefinition() - + # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to search the file # also, you do not need to provide tool_resources if you did not create a vector store above agent = await ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", tools=[file_search_tool], - tool_resources=ToolResources(file_search=FileSearchToolResource(vector_store_ids=[vector_store.id])) + tool_resources=ToolResources(file_search=FileSearchToolResource(vector_store_ids=[vector_store.id])), ) print(f"Created agent, agent ID: {agent.id}") - + thread = await ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") + print(f"Created thread, thread ID: {thread.id}") # create a message with the attachment attachment = MessageAttachment(file_id=file.id, tools=[file_search_tool]) - message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment]) + message = await ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] + ) print(f"Created message, message ID: {message.id}") - run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id, sleep_interval=4) + run = await ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id, sleep_interval=4 + ) print(f"Created run, run ID: {run.id}") print(f"Run completed with status: {run.status}") - + await ai_client.agents.delete_file(file.id) print("Deleted file") @@ -92,9 +89,9 @@ async def main(): await ai_client.agents.delete_agent(agent.id) print("Deleted agent") - - messages = await ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") + + messages = await ai_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/user_async_functions.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/user_async_functions.py index 66843dc43fe0..4931352e03c6 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/user_async_functions.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/user_async_functions.py @@ -10,19 +10,20 @@ # Add parent directory to sys.path to import user_functions current_dir = os.path.dirname(os.path.abspath(__file__)) -parent_dir = os.path.abspath(os.path.join(current_dir, '..')) +parent_dir = os.path.abspath(os.path.join(current_dir, "..")) if parent_dir not in sys.path: sys.path.insert(0, parent_dir) from user_functions import fetch_current_datetime, fetch_weather, send_email - + async def send_email_async(recipient: str, subject: str, body: str) -> str: - await asyncio.sleep(1) + await asyncio.sleep(1) return send_email(recipient, subject, body) # Statically defined user functions for fast reference with send_email as async but the rest as sync -user_async_functions = {"fetch_current_datetime": fetch_current_datetime, - "fetch_weather": fetch_weather, - "send_email": send_email_async +user_async_functions = { + "fetch_current_datetime": fetch_current_datetime, + "fetch_weather": fetch_weather, + "send_email": send_email_async, } diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py index 794d53cd51c6..bd4acf9b7fc4 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py @@ -34,50 +34,39 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) -# Or, you can create the Azure AI Client by giving all required parameters directly -""" -ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging -) -""" - with ai_client: # upload a file and wait for it to be processed file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) print(f"Uploaded file, file ID: {file.id}") - + code_interpreter = CodeInterpreterTool() code_interpreter.add_file(file.id) - + # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to view the file agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - tools=[CodeInterpreterToolDefinition()] + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=[CodeInterpreterToolDefinition()], ) print(f"Created agent, agent ID: {agent.id}") thread = ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") + print(f"Created thread, thread ID: {thread.id}") # create a message with the attachment attachment = MessageAttachment(file_id=file.id, tools=code_interpreter.definitions) - message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment]) + message = ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) print(f"Created message, message ID: {message.id}") run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Created run, run ID: {run.id}") - + ai_client.agents.delete_file(file.id) print("Deleted file") @@ -86,4 +75,3 @@ messages = ai_client.agents.list_messages(thread_id=thread.id) print(f"Messages: {messages}") - diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py index edcb7ed3b5d8..93abf3542dfd 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py @@ -37,31 +37,18 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) -# Or, you can create the Azure AI Client by giving all required parameters directly -""" -ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging -) -""" - with ai_client: # Create file search tool file_search = FileSearchTool() openai_file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") print(f"Uploaded file, file ID: {openai_file.id}") - + openai_vectorstore = ai_client.agents.create_vector_store_and_poll(file_ids=[openai_file.id], name="my_vectorstore") print(f"Created vector store, vector store ID: {openai_vectorstore.id}") - + file_search.add_vector_store(openai_vectorstore.id) toolset = ToolSet() @@ -69,7 +56,10 @@ # Create agent with toolset and process assistant run agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", toolset=toolset + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + toolset=toolset, ) print(f"Created agent, agent ID: {agent.id}") @@ -78,7 +68,9 @@ print(f"Created thread, ID: {thread.id}") # Create message to thread - message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?") + message = ai_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" + ) print(f"Created message, ID: {message.id}") # Create and process assistant run in thread with tools diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py index 008b6af5ca9f..0f26d174c65a 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py @@ -32,8 +32,7 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) # Initialize function tool with user functions @@ -78,16 +77,20 @@ if isinstance(tool_call, RequiredFunctionToolCall): try: output = functions.execute(tool_call) - tool_outputs.append({ - "tool_call_id": tool_call.id, - "output": output, - }) + tool_outputs.append( + { + "tool_call_id": tool_call.id, + "output": output, + } + ) except Exception as e: print(f"Error executing tool_call {tool_call.id}: {e}") print(f"Tool outputs: {tool_outputs}") if tool_outputs: - ai_client.agents.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs) + ai_client.agents.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) print(f"Current run status: {run.status}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py index eeecf71292b5..0b883a7f39ac 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py @@ -46,6 +46,7 @@ conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], ) + class MyEventHandler(AgentEventHandler): def on_message_delta(self, delta: "MessageDeltaChunk") -> None: for content_part in delta.delta.content: diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py index 9bc86af6bae4..56733cfb80f9 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py @@ -38,22 +38,9 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) -# Or, you can create the Azure AI Client by giving all required parameters directly -""" -ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging -) -""" - class MyEventHandler(AgentEventHandler): def __init__(self, functions: FunctionTool) -> None: @@ -82,20 +69,19 @@ def on_thread_run(self, run: "ThreadRun") -> None: if isinstance(tool_call, RequiredFunctionToolCall): try: output = functions.execute(tool_call) - tool_outputs.append({ - "tool_call_id": tool_call.id, - "output": output, - }) + tool_outputs.append( + { + "tool_call_id": tool_call.id, + "output": output, + } + ) except Exception as e: print(f"Error executing tool_call {tool_call.id}: {e}") print(f"Tool outputs: {tool_outputs}") if tool_outputs: with ai_client.agents.submit_tool_outputs_to_stream( - thread_id=run.thread_id, - run_id=run.id, - tool_outputs=tool_outputs, - event_handler=self + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=self ) as stream: stream.until_done() @@ -116,20 +102,25 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: functions = FunctionTool(user_functions) agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", tools=functions.definitions + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, ) print(f"Created agent, ID: {agent.id}") thread = ai_client.agents.create_thread() print(f"Created thread, thread ID {thread.id}") - message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.") + message = ai_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", + ) print(f"Created message, message ID {message.id}") with ai_client.agents.create_stream( - thread_id=thread.id, - assistant_id=agent.id, - event_handler=MyEventHandler(functions) + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler(functions) ) as stream: stream.until_done() diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index d8f023c56004..fd64b52779d1 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -41,22 +41,9 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) -# Or, you can create the Azure AI Client by giving all required parameters directly -""" -ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging -) -""" - # When using FunctionTool with ToolSet in agent creation, the tool call events are handled inside the create_stream # method and functions gets automatically called by default. class MyEventHandler(AgentEventHandler): @@ -102,13 +89,15 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: thread = ai_client.agents.create_thread() print(f"Created thread, thread ID {thread.id}") - message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, send an email with the datetime and weather information in New York? Also let me know the details") + message = ai_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", + ) print(f"Created message, message ID {message.id}") with ai_client.agents.create_stream( - thread_id=thread.id, - assistant_id=agent.id, - event_handler=MyEventHandler() + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() ) as stream: stream.until_done() diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py index 0eeb6cbf401a..05c6e1878289 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py @@ -36,22 +36,9 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) -# Or, you can create the Azure AI Client by giving all required parameters directly -""" -ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging -) -""" - # Function to handle tool stream iteration def handle_submit_tool_outputs(operations: AgentsOperations, thread_id, run_id, tool_outputs): try: @@ -110,10 +97,10 @@ def handle_message_delta(delta: MessageDeltaChunk) -> None: elif isinstance(event_data, ThreadRun): print(f"ThreadRun status: {event_data.status}") - + if event_data.status == "failed": print(f"Run failed. Error: {event_data.last_error}") - + elif isinstance(event_data, RunStep): print(f"RunStep type: {event_data.type}, Status: {event_data.status}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py index aa536509e19b..bb79688cb110 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py @@ -34,55 +34,46 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) -# Or, you can create the Azure AI Client by giving all required parameters directly -""" -ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging -) -""" - with ai_client: - + # upload a file and wait for it to be processed - file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) + file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) print(f"Uploaded file, file ID: {file.id}") # create a vector store with the file and wait for it to be processed - # if you do not specify a vector store, create_message will create a vector store with a default expiration policy of seven days after they were last active + # if you do not specify a vector store, create_message will create a vector store with a default expiration policy of seven days after they were last active vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[file.id], name="sample_vector_store") print(f"Created vector store, vector store ID: {vector_store.id}") - + file_search_tool = FileSearchTool() file_search_tool.add_vector_store(vector_store.id) - + # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to search the file # also, you do not need to provide tool_resources if you did not create a vector store above agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", - tool_resources=ToolResources(file_search=FileSearchToolResource(vector_store_ids=[vector_store.id])) + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tool_resources=ToolResources(file_search=FileSearchToolResource(vector_store_ids=[vector_store.id])), ) print(f"Created agent, agent ID: {agent.id}") thread = ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") + print(f"Created thread, thread ID: {thread.id}") # create a message with the attachment attachment = MessageAttachment(file_id=file.id, tools=file_search_tool.definitions) - message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment]) + message = ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] + ) print(f"Created message, message ID: {message.id}") run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Created run, run ID: {run.id}") - + ai_client.agents.delete_file(file.id) print("Deleted file") @@ -91,6 +82,6 @@ ai_client.agents.delete_agent(agent.id) print("Deleted agent") - - messages = ai_client.agents.list_messages(thread_id=thread.id) + + messages = ai_client.agents.list_messages(thread_id=thread.id) print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py b/sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py similarity index 50% rename from sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py rename to sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py index 66cc29821061..ba37c07034ac 100644 --- a/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py +++ b/sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py @@ -4,14 +4,14 @@ # ------------------------------------ """ -FILE: sample_endpoints_async.py +FILE: sample_connections_async.py DESCRIPTION: - Given an asynchronous AzureAIClient, this sample demonstrates how to enumerate endpoints - and get endpoint properties. + Given an asynchronous AzureAIClient, this sample demonstrates how to enumerate connections + and get connections properties. USAGE: - python sample_endpoints_async.py + python sample_connections_async.py Before running the sample: @@ -24,67 +24,75 @@ import asyncio import os from azure.ai.client.aio import AzureAIClient -from azure.ai.client.models import EndpointType, AuthenticationType +from azure.ai.client.models import ConnectionType, AuthenticationType from azure.identity import DefaultAzureCredential -async def sample_endpoints_async(): +async def sample_connections_async(): # Create an Azure AI Client from a connection string, copied from your AI Studio project. # It should be in the format ";;;" - async with AzureAIClient.from_connection_string( + ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], - ) as ai_client: + ) - # List all endpoints of a particular "type", with or without their credentials: - print("====> Listing of all Azure Open AI endpoints:") - async for endpoint in ai_client.endpoints.list( - endpoint_type=EndpointType.AZURE_OPEN_AI, # Optional. Defaults to all types. + async with ai_client: + + # List all connections: + print("====> Listing of all connections:") + async for connection in ai_client.connections.list(): + print(connection) + + # List all connections of a particular "type", with or without their credentials: + print("====> Listing of all Azure Open AI connections:") + async for connection in ai_client.connections.list( + connection_type=ConnectionType.AZURE_OPEN_AI, # Optional. Defaults to all types. populate_secrets=True, # Optional. Defaults to "False" ): - print(endpoint) + print(connection) - # Get the default endpoint of a particular "type" (note that since at the moment the service - # does not have a notion of a default endpoint, this will return the first endpoint of that type): - endpoint = await ai_client.endpoints.get_default( - endpoint_type=EndpointType.AZURE_OPEN_AI, + # Get the default connection of a particular "type": + connection = await ai_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, populate_secrets=True, # Required. # Optional. Defaults to "False" ) - print("====> Get default Azure Open AI endpoint:") - print(endpoint) + print("====> Get default Azure Open AI connection:") + print(connection) - # Get an endpoint by its name: - endpoint = await ai_client.endpoints.get( - endpoint_name=os.environ["AI_CLIENT_ENDPOINT_NAME"], populate_secrets=True # Required. + # Get a connection by its name: + connection = await ai_client.connections.get( + connection_name=os.environ["AI_CLIENT_CONNECTION_NAME"], populate_secrets=True # Required. ) - print("====> Get endpoint by name:") - print(endpoint) + print("====> Get connection by name:") + print(connection) + # Examples of how you would create Inference client - if endpoint.endpoint_type == EndpointType.AZURE_OPEN_AI: + if connection.connection_type == ConnectionType.AZURE_OPEN_AI: from openai import AsyncAzureOpenAI - if endpoint.authentication_type == AuthenticationType.API_KEY: + if connection.authentication_type == AuthenticationType.API_KEY: print("====> Creating AzureOpenAI client using API key authentication") client = AsyncAzureOpenAI( - api_key=endpoint.key, - azure_endpoint=endpoint.endpoint_url, - api_version="2024-06-01" # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + api_key=connection.key, + azure_endpoint=connection.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs ) - elif endpoint.authentication_type == AuthenticationType.AAD: + elif connection.authentication_type == AuthenticationType.AAD: print("====> Creating AzureOpenAI client using Entra ID authentication") from azure.identity import get_bearer_token_provider + client = AsyncAzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( - endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + connection.token_credential, "https://cognitiveservices.azure.com/.default" ), - azure_endpoint=endpoint.endpoint_url, - api_version="2024-06-01" # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + azure_endpoint=connection.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs ) else: - raise ValueError(f"Authentication type {endpoint.authentication_type} not supported.") + raise ValueError(f"Authentication type {connection.authentication_type} not supported.") response = await client.chat.completions.create( model="gpt-4o", @@ -97,25 +105,24 @@ async def sample_endpoints_async(): ) print(response.choices[0].message.content) - elif endpoint.endpoint_type == EndpointType.SERVERLESS: + elif connection.connection_type == ConnectionType.SERVERLESS: from azure.ai.inference.aio import ChatCompletionsClient from azure.ai.inference.models import UserMessage - if endpoint.authentication_type == AuthenticationType.API_KEY: + if connection.authentication_type == AuthenticationType.API_KEY: print("====> Creating ChatCompletionsClient using API key authentication") from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient( - endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key) - ) - elif endpoint.authentication_type == AuthenticationType.AAD: + + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") client = ChatCompletionsClient( - endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential + endpoint=connection.endpoint_url, credential=connection.properties.token_credential ) else: - raise ValueError(f"Authentication type {endpoint.authentication_type} not supported.") + raise ValueError(f"Authentication type {connection.authentication_type} not supported.") response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) await client.close() @@ -123,9 +130,8 @@ async def sample_endpoints_async(): async def main(): - await sample_endpoints_async() + await sample_connections_async() if __name__ == "__main__": asyncio.run(main()) - diff --git a/sdk/ai/azure-ai-client/samples/connections/sample_connections.py b/sdk/ai/azure-ai-client/samples/connections/sample_connections.py new file mode 100644 index 000000000000..ce34f8fd554d --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/connections/sample_connections.py @@ -0,0 +1,120 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_connections.py + +DESCRIPTION: + Given an AzureAIClient, this sample demonstrates how to enumerate connections + and get connection properties. + +USAGE: + python sample_connections.py + + Before running the sample: + + pip install azure.ai.client azure-identity + + Set the environment variables with your own values: + 1) AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.client import AzureAIClient +from azure.ai.client.models import ConnectionType, AuthenticationType +from openai import AzureOpenAI +from azure.ai.inference import ChatCompletionsClient +from azure.ai.inference.models import UserMessage +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from azure.core.credentials import AzureKeyCredential + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# It should be in the format ";;;" +ai_client = AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], +) + +with ai_client: + + # List all connections + connections = ai_client.connections.list() + print(f"====> Listing of all connections (found {len(connections)}):") + for connection in connections: + print(connection) + + # List all connections of a particular "type", with or without their credentials: + connections = ai_client.connections.list( + connection_type=ConnectionType.AZURE_OPEN_AI, # Optional. Defaults to all types. + populate_secrets=True, # Optional. Defaults to "False" + ) + print("====> Listing of all Azure Open AI connections (found {len(connections)}):") + for connection in connections: + print(connection) + + # Get the default connection of a particular "type": + connection = ai_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, populate_secrets=True # Required. # Optional. Defaults to "False" + ) + print("====> Get default Azure Open AI connection:") + print(connection) + + # Get a connection by name: + connection = ai_client.connections.get( + connection_name=os.environ["AI_CLIENT_CONNECTION_NAME"], populate_secrets=True # Required. + ) + print("====> Get connection by name:") + print(connection) + +# Examples of how you would create Inference client +if connection.connection_type == ConnectionType.AZURE_OPEN_AI: + + if connection.authentication_type == AuthenticationType.API_KEY: + print("====> Creating AzureOpenAI client using API key authentication") + client = AzureOpenAI( + api_key=connection.key, + azure_endpoint=connection.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + ) + elif connection.authentication_type == AuthenticationType.AAD: + print("====> Creating AzureOpenAI client using Entra ID authentication") + client = AzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider( + connection.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=connection.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + ) + else: + raise ValueError(f"Authentication type {connection.authentication_type} not supported.") + + response = client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + client.close() + print(response.choices[0].message.content) + +elif connection.connection_type == ConnectionType.SERVERLESS: + + if connection.authentication_type == AuthenticationType.API_KEY: + print("====> Creating ChatCompletionsClient using API key authentication") + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + elif connection.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + print("====> Creating ChatCompletionsClient using Entra ID authentication") + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.properties.token_credential) + else: + raise ValueError(f"Authentication type {connection.authentication_type} not supported.") + + response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + client.close() + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py b/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py deleted file mode 100644 index 51a49f309f42..000000000000 --- a/sdk/ai/azure-ai-client/samples/endpoints/sample_endpoints.py +++ /dev/null @@ -1,114 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_endpoints.py - -DESCRIPTION: - Given an AzureAIClient, this sample demonstrates how to enumerate endpoints - and get endpoint properties. - -USAGE: - python sample_endpoints.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set the environment variables with your own values: - 1) AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.client import AzureAIClient -from azure.ai.client.models import EndpointType, AuthenticationType -from openai import AzureOpenAI -from azure.ai.inference import ChatCompletionsClient -from azure.ai.inference.models import UserMessage -from azure.identity import DefaultAzureCredential, get_bearer_token_provider -from azure.core.credentials import AzureKeyCredential - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# It should be in the format ";;;" -with AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], -) as ai_client: - - # List all endpoints of a particular "type", with or without their credentials: - endpoints = ai_client.endpoints.list( - endpoint_type=EndpointType.AZURE_OPEN_AI, # Optional. Defaults to all types. - populate_secrets=True, # Optional. Defaults to "False" - ) - print("====> Listing of all Azure Open AI endpoints:") - for endpoint in endpoints: - print(endpoint) - - # Get the default endpoint of a particular "type" (note that since at the moment the service - # does not have a notion of a default endpoint, this will return the first endpoint of that type): - endpoint = ai_client.endpoints.get_default( - endpoint_type=EndpointType.AZURE_OPEN_AI, populate_secrets=True # Required. # Optional. Defaults to "False" - ) - print("====> Get default Azure Open AI endpoint:") - print(endpoint) - - # Get an endpoint by its name: - endpoint = ai_client.endpoints.get( - endpoint_name=os.environ["AI_CLIENT_ENDPOINT_NAME"], populate_secrets=True # Required. - ) - print("====> Get endpoint by name:") - print(endpoint) - - -# Examples of how you would create Inference client -if endpoint.endpoint_type == EndpointType.AZURE_OPEN_AI: - - if endpoint.authentication_type == AuthenticationType.API_KEY: - print("====> Creating AzureOpenAI client using API key authentication") - client = AzureOpenAI( - api_key=endpoint.key, - azure_endpoint=endpoint.endpoint_url, - api_version="2024-06-01" # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - ) - elif endpoint.authentication_type == AuthenticationType.AAD: - print("====> Creating AzureOpenAI client using Entra ID authentication") - client = AzureOpenAI( - # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider( - endpoint.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=endpoint.endpoint_url, - api_version="2024-06-01" # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - ) - else: - raise ValueError(f"Authentication type {endpoint.authentication_type} not supported.") - - response = client.chat.completions.create( - model="gpt-4o", - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], - ) - client.close() - print(response.choices[0].message.content) - -elif endpoint.endpoint_type == EndpointType.SERVERLESS: - - if endpoint.authentication_type == AuthenticationType.API_KEY: - print("====> Creating ChatCompletionsClient using API key authentication") - client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) - elif endpoint.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - print("====> Creating ChatCompletionsClient using Entra ID authentication") - client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential) - else: - raise ValueError(f"Authentication type {endpoint.authentication_type} not supported.") - - response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - client.close() - print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations_schedules.py b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations_schedules.py index 288aeb4dfb3f..286f5ac53e9b 100644 --- a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations_schedules.py +++ b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations_schedules.py @@ -1,12 +1,17 @@ from azure.ai.client import AzureAIClient from azure.identity import DefaultAzureCredential -from azure.ai.client.models import AppInsightsConfiguration, EvaluatorConfiguration, SamplingStrategy, EvaluationSchedule, CronTrigger +from azure.ai.client.models import ( + AppInsightsConfiguration, + EvaluatorConfiguration, + SamplingStrategy, + EvaluationSchedule, + CronTrigger, +) + def main(): app_insights_config = AppInsightsConfiguration( - resource_id="sample_id", - query="your_connection_string", - service_name="sample_service_name" + resource_id="sample_id", query="your_connection_string", service_name="sample_service_name" ) f1_evaluator_config = EvaluatorConfiguration( @@ -16,15 +21,12 @@ def main(): custom_relevance_evaluator_config = EvaluatorConfiguration( id="azureml://registries/jamahaja-evals-registry/models/Relevance-Evaluator-AI-Evaluation/versions/2", init_params={"param3": "value3", "param4": "value4"}, - data_mapping={"data3": "value3", "data4": "value4"} + data_mapping={"data3": "value3", "data4": "value4"}, ) cron_expression = "0 0 0 1/1 * ? *" cron_trigger = CronTrigger(expression=cron_expression) - evaluators = { - "f1_score": f1_evaluator_config, - "relevance": custom_relevance_evaluator_config - } + evaluators = {"f1_score": f1_evaluator_config, "relevance": custom_relevance_evaluator_config} sampling_strategy = SamplingStrategy(rate=0.2) display_name = "Sample Online Evaluation Schedule" @@ -40,7 +42,7 @@ def main(): display_name=display_name, description=description, tags=tags, - properties=properties + properties=properties, ) # Project Configuration @@ -51,15 +53,17 @@ def main(): client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), conn_str=f"{Endpoint};{Subscription};{ResourceGroup};{Workspace}", - logging_enable=True + logging_enable=True, ) client.evaluations - evaluation_schedule = client.evaluations.create_or_replace_schedule(id= "sample_schedule_id", resource=evaluation_schedule) + evaluation_schedule = client.evaluations.create_or_replace_schedule( + id="sample_schedule_id", resource=evaluation_schedule + ) client.evaluations.get_schedule(evaluation_schedule.id) client.evaluations.list_schedule() client.evaluations.list() client.evaluations.delete_schedule(evaluation_schedule.id) - + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py index cd6022f4c8a2..a9e38df25949 100644 --- a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py +++ b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py @@ -52,4 +52,3 @@ async def main(): if __name__ == "__main__": asyncio.run(main()) - diff --git a/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py index 361f7154d9bd..665e8c4a7cfd 100644 --- a/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py @@ -1,3 +1,4 @@ +# pylint: disable=too-many-lines # # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -20,13 +21,13 @@ # TODO clean this up / get rid of anything not in use -''' +""" issues I've noticed with the code: delete_thread(thread.id) fails cancel_thread(thread.id) expires/times out occasionally added time.sleep() to the beginning of my last few tests to avoid limits when using the endpoint from Howie, delete_agent(agent.id) did not work but would not cause an error -''' +""" # Set to True to enable SDK logging LOGGING_ENABLED = True @@ -44,7 +45,7 @@ agentClientPreparer = functools.partial( EnvironmentVariableLoader, - 'azure_ai_client', + "azure_ai_client", azure_ai_client_agents_connection_string="https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", ) """ @@ -58,28 +59,30 @@ ) """ + # create tool for agent use def fetch_current_datetime_live(): - """ - Get the current time as a JSON string. + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + time_json = json.dumps({"current_time": current_datetime}) + return time_json - :return: Static time string so that test recordings work. - :rtype: str - """ - current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - time_json = json.dumps({"current_time": current_datetime}) - return time_json # create tool for agent use def fetch_current_datetime_recordings(): - """ - Get the current time as a JSON string. + """ + Get the current time as a JSON string. - :return: Static time string so that test recordings work. - :rtype: str - """ - time_json = json.dumps({"current_time": "2024-10-10 12:30:19"}) - return time_json + :return: Static time string so that test recordings work. + :rtype: str + """ + time_json = json.dumps({"current_time": "2024-10-10 12:30:19"}) + return time_json # Statically defined user functions for fast reference @@ -103,7 +106,7 @@ def create_client(self, **kwargs): ) return client - + # for debugging purposes: if a test fails and its agent has not been deleted, it will continue to show up in the agents list """ # NOTE: this test should not be run against a shared resource, as it will delete all agents @@ -124,21 +127,18 @@ def test_clear_client(self, **kwargs): client.close() """ - -# # ********************************************************************************** -# # -# # UNIT TESTS -# # -# # ********************************************************************************** - - + # # ********************************************************************************** + # # + # # UNIT TESTS + # # + # # ********************************************************************************** # # ********************************************************************************** # # # # HAPPY PATH SERVICE TESTS - agent APIs # # # # ********************************************************************************** - + # test client creation @agentClientPreparer() @recorded_by_proxy @@ -163,7 +163,7 @@ def test_create_delete_agent(self, **kwargs): agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) - + # delete agent and close client client.agents.delete_agent(agent.id) print("Deleted agent") @@ -181,12 +181,14 @@ def test_create_agent_with_tools(self, **kwargs): functions = FunctionTool(functions=user_functions_recording) # create agent with tools - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=functions.definitions) + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=functions.definitions + ) assert agent.id print("Created agent, agent ID", agent.id) assert agent.tools - assert agent.tools[0]['function']['name'] == functions.definitions[0]['function']['name'] - print("Tool successfully submitted:", functions.definitions[0]['function']['name']) + assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) # delete agent and close client client.agents.delete_agent(agent.id) @@ -252,7 +254,7 @@ def test_agent_list(self, **kwargs): # # ********************************************************************************** - # test creating thread + # test creating thread @agentClientPreparer() @recorded_by_proxy def test_create_thread(self, **kwargs): @@ -300,13 +302,12 @@ def test_get_thread(self, **kwargs): assert thread.id == thread2.id print("Got thread, thread ID", thread2.id) - # delete agent and close client client.agents.delete_agent(agent.id) print("Deleted agent") client.close() - ''' + """ TODO what can I update a thread with? # test updating thread @agentClientPreparer() @@ -334,9 +335,9 @@ def test_update_thread(self, **kwargs): client.agents.delete_agent(agent.id) print("Deleted agent") client.close() - ''' + """ - ''' + """ # TODO this test is failing? client.agents.delete_thread(thread.id) isn't working # status_code = 404, response = # error_map = {304: , 401: , 409: } @@ -368,15 +369,14 @@ def test_delete_thread(self, **kwargs): client.agents.delete_agent(agent.id) print("Deleted agent") client.close() - ''' - + """ # # ********************************************************************************** # # # # HAPPY PATH SERVICE TESTS - Message APIs # # # # ********************************************************************************** - + # test creating message in a thread @agentClientPreparer() @recorded_by_proxy @@ -468,7 +468,7 @@ def test_list_messages(self, **kwargs): print("Created message, message ID", message1.id) messages1 = client.agents.list_messages(thread_id=thread.id) assert messages1.data.__len__() == 1 - assert messages1.data[0].id == message1.id + assert messages1.data[0].id == message1.id message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") assert message2.id @@ -482,7 +482,11 @@ def test_list_messages(self, **kwargs): print("Created message, message ID", message3.id) messages3 = client.agents.list_messages(thread_id=thread.id) assert messages3.data.__len__() == 3 - assert messages3.data[0].id == message3.id or messages3.data[1].id == message2.id or messages3.data[2].id == message2.id + assert ( + messages3.data[0].id == message3.id + or messages3.data[1].id == message2.id + or messages3.data[2].id == message2.id + ) # delete agent and close client client.agents.delete_agent(agent.id) @@ -523,7 +527,7 @@ def test_get_message(self, **kwargs): print("Deleted agent") client.close() - ''' + """ TODO format the updated body # test updating message in a thread @agentClientPreparer() @@ -556,7 +560,7 @@ def test_update_message(self, **kwargs): client.agents.delete_agent(agent.id) print("Deleted agent") client.close() - ''' + """ # # ********************************************************************************** # # @@ -626,7 +630,7 @@ def test_get_run(self, **kwargs): print("Deleted agent") client.close() - # TODO fix bc sometimes it works? and sometimes it doesnt? + # TODO fix bc sometimes it works? and sometimes it doesnt? # test sucessful run status TODO test for cancelled/unsucessful runs @agentClientPreparer() @recorded_by_proxy @@ -656,7 +660,16 @@ def test_run_status(self, **kwargs): print("Created run, run ID", run.id) # check status - assert run.status in ["queued", "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", "expired"] + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) @@ -671,7 +684,7 @@ def test_run_status(self, **kwargs): print("Deleted agent") client.close() - ''' + """ # TODO another, but check that the number of runs decreases after cancelling runs # TODO can each thread only support one run? # test listing runs @@ -716,9 +729,9 @@ def test_list_runs(self, **kwargs): client.agents.delete_agent(agent.id) print("Deleted agent") client.close() - ''' + """ - ''' + """ # TODO figure out what to update the run with # test updating run @agentClientPreparer() @@ -751,7 +764,7 @@ def test_update_run(self, **kwargs): client.agents.delete_agent(agent.id) print("Deleted agent") client.close() - ''' + """ # test submitting tool outputs to run @agentClientPreparer() @@ -770,7 +783,9 @@ def test_submit_tool_outputs_to_run(self, **kwargs): toolset.add(code_interpreter) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent", toolset=toolset) + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent", toolset=toolset + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -791,11 +806,20 @@ def test_submit_tool_outputs_to_run(self, **kwargs): # check that tools are uploaded assert run.tools - assert run.tools[0]['function']['name'] == functions.definitions[0]['function']['name'] - print("Tool successfully submitted:", functions.definitions[0]['function']['name']) + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) # check status - assert run.status in ["queued", "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", "expired"] + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] while run.status in ["queued", "in_progress", "requires_action"]: time.sleep(1) run = client.agents.get_run(thread_id=thread.id, run_id=run.id) @@ -805,12 +829,14 @@ def test_submit_tool_outputs_to_run(self, **kwargs): print("Requires action: submit tool outputs") tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: - print("No tool calls provided - cancelling run") # TODO how can i make sure that it wants tools? should i have some kind of error message? + print( + "No tool calls provided - cancelling run" + ) # TODO how can i make sure that it wants tools? should i have some kind of error message? client.agents.cancel_run(thread_id=thread.id, run_id=run.id) break # submit tool outputs to run - tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here + tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here print("Tool outputs:", tool_outputs) if tool_outputs: client.agents.submit_tool_outputs_to_run( @@ -821,10 +847,9 @@ def test_submit_tool_outputs_to_run(self, **kwargs): print("Run completed with status:", run.status) - # check that messages used the tool messages = client.agents.list_messages(thread_id=thread.id, run_id=run.id) - tool_message = messages['data'][0]['content'][0]['text']['value'] + tool_message = messages["data"][0]["content"][0]["text"]["value"] hour12 = time.strftime("%H") hour24 = time.strftime("%I") minute = time.strftime("%M") @@ -909,7 +934,16 @@ def test_create_thread_and_run(self, **kwargs): print("Created thread, thread ID", thread.id) # check status - assert run.status in ["queued", "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", "expired"] + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] while run.status in ["queued", "in_progress", "requires_action"]: # wait for a second time.sleep(1) @@ -929,7 +963,7 @@ def test_create_thread_and_run(self, **kwargs): @agentClientPreparer() @recorded_by_proxy def test_list_run_step(self, **kwargs): - + time.sleep(50) # create client client = self.create_client(**kwargs) @@ -968,7 +1002,7 @@ def test_list_run_step(self, **kwargs): assert run.status in ["queued", "in_progress", "requires_action", "completed"] print("Run status:", run.status) steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) - assert steps['data'].__len__() > 0 # TODO what else should we look at? + assert steps["data"].__len__() > 0 # TODO what else should we look at? assert run.status == "completed" print("Run completed") @@ -998,7 +1032,9 @@ def test_get_run_step(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, can you tell me a joke?") + message = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) assert message.id print("Created message, message ID", message.id) @@ -1007,10 +1043,10 @@ def test_get_run_step(self, **kwargs): assert run.id print("Created run, run ID", run.id) - if (run.status == "failed"): - assert run.last_error - print(run.last_error) - print("FAILED HERE") + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") # check status assert run.status in ["queued", "in_progress", "requires_action", "completed"] @@ -1018,7 +1054,7 @@ def test_get_run_step(self, **kwargs): # wait for a second time.sleep(1) run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - if (run.status == "failed"): + if run.status == "failed": assert run.last_error print(run.last_error) print("FAILED HERE") @@ -1027,8 +1063,8 @@ def test_get_run_step(self, **kwargs): # list steps, check that get_run_step works with first step_id steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) - assert steps['data'].__len__() > 0 - step = steps['data'][0] + assert steps["data"].__len__() > 0 + step = steps["data"][0] get_step = client.agents.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) assert step == get_step @@ -1036,14 +1072,13 @@ def test_get_run_step(self, **kwargs): client.agents.delete_agent(agent.id) print("Deleted agent") client.close() - + # # ********************************************************************************** # # # # HAPPY PATH SERVICE TESTS - Streaming APIs # # # # ********************************************************************************** - # # ********************************************************************************** # # # # NEGATIVE TESTS - TODO idk what goes here @@ -1082,4 +1117,3 @@ def test_negative_create_delete_agent(self, **kwargs): client.close() assert exception_caught """ - diff --git a/sdk/ai/azure-ai-client/tsp-location.yaml b/sdk/ai/azure-ai-client/tsp-location.yaml index 4cf4cd6906cc..810ef45737fc 100644 --- a/sdk/ai/azure-ai-client/tsp-location.yaml +++ b/sdk/ai/azure-ai-client/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Client -commit: 3ce2d0fb070fea5f0dd7ca7095eea280b1f81671 +commit: 2d73eae253e8f17286d5ca85eefb9330b6a93d3d repo: Azure/azure-rest-api-specs additionalDirectories: From 879ffe8dd81fa69bcea20842603189cdee22a6a8 Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Fri, 18 Oct 2024 09:16:02 -0800 Subject: [PATCH 035/138] updates and minor fixes (#37984) --- .../azure/ai/client/models/_patch.py | 1 - .../azure/ai/client/operations/_patch.py | 2 +- ...gents_with_file_search_attachment_async.py | 25 +++++-------------- .../agents/sample_agents_file_search.py | 21 ++++++---------- ...mple_agents_with_file_search_attachment.py | 24 +++++------------- 5 files changed, 20 insertions(+), 53 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index b845f535cfec..e5022c3c3216 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -304,7 +304,6 @@ def add_vector_store(self, store_id: str): """ Add a vector store ID to the list of vector stores to search for files. """ - # TODO self.vector_store_ids.append(store_id) @property diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index b64000ce6e40..f3cba5090134 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -33,7 +33,7 @@ if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports - import _types + from .. import _types JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py index 25342eb89930..89c39c6b133b 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -24,7 +24,7 @@ from azure.ai.client.aio import AzureAIClient from azure.ai.client.models import FilePurpose -from azure.ai.client.models import FileSearchToolDefinition, FileSearchToolResource, MessageAttachment, ToolResources +from azure.ai.client.models import FileSearchTool, MessageAttachment, ToolResources from azure.identity import DefaultAzureCredential import os @@ -42,33 +42,23 @@ async def main(): # upload a file and wait for it to be processed async with ai_client: file = await ai_client.agents.upload_file_and_poll( - file_path="../product_info_1.md", purpose=FilePurpose.AGENTS, sleep_interval=4 + file_path="../product_info_1.md", purpose=FilePurpose.AGENTS ) - # create a vector store with the file and wait for it to be processed - # if you do not specify a vector store, create_message will create a vector store with a default expiration policy of seven days after they were last active - vector_store = await ai_client.agents.create_vector_store_and_poll( - file_ids=[file.id], name="sample_vector_store", sleep_interval=4 - ) - - file_search_tool = FileSearchToolDefinition() - - # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to search the file - # also, you do not need to provide tool_resources if you did not create a vector store above + # Create agent with file search tool agent = await ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", - tools=[file_search_tool], - tool_resources=ToolResources(file_search=FileSearchToolResource(vector_store_ids=[vector_store.id])), ) print(f"Created agent, agent ID: {agent.id}") thread = await ai_client.agents.create_thread() print(f"Created thread, thread ID: {thread.id}") - # create a message with the attachment - attachment = MessageAttachment(file_id=file.id, tools=[file_search_tool]) + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) message = await ai_client.agents.create_message( thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] ) @@ -84,9 +74,6 @@ async def main(): await ai_client.agents.delete_file(file.id) print("Deleted file") - await ai_client.agents.delete_vector_store(vector_store.id) - print("Deleted vectore store") - await ai_client.agents.delete_agent(agent.id) print("Deleted agent") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py index 93abf3542dfd..06b550bbd64f 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py @@ -21,14 +21,9 @@ AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - import os from azure.ai.client import AzureAIClient -from azure.ai.client.models._patch import FileSearchTool, ToolSet +from azure.ai.client.models._patch import FileSearchTool from azure.identity import DefaultAzureCredential @@ -41,25 +36,23 @@ ) with ai_client: - # Create file search tool - file_search = FileSearchTool() + openai_file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") print(f"Uploaded file, file ID: {openai_file.id}") openai_vectorstore = ai_client.agents.create_vector_store_and_poll(file_ids=[openai_file.id], name="my_vectorstore") print(f"Created vector store, vector store ID: {openai_vectorstore.id}") - file_search.add_vector_store(openai_vectorstore.id) - - toolset = ToolSet() - toolset.add(file_search) + # Create file search tool with resources + file_search = FileSearchTool(vector_store_ids=[openai_vectorstore.id]) - # Create agent with toolset and process assistant run + # Create agent with file search tool and process assistant run agent = ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="Hello, you are helpful assistant and can search information from uploaded files", - toolset=toolset, + tools=file_search.definitions, + tool_resources=file_search.resources, ) print(f"Created agent, agent ID: {agent.id}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py index bb79688cb110..83e2a198586c 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py @@ -24,8 +24,8 @@ import os from azure.ai.client import AzureAIClient from azure.ai.client.models import FilePurpose -from azure.ai.client.models import FileSearchToolResource, MessageAttachment, ToolResources -from azure.ai.client.models import FileSearchTool, ToolSet +from azure.ai.client.models import MessageAttachment +from azure.ai.client.models import FileSearchTool from azure.identity import DefaultAzureCredential @@ -43,29 +43,20 @@ file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) print(f"Uploaded file, file ID: {file.id}") - # create a vector store with the file and wait for it to be processed - # if you do not specify a vector store, create_message will create a vector store with a default expiration policy of seven days after they were last active - vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[file.id], name="sample_vector_store") - print(f"Created vector store, vector store ID: {vector_store.id}") - - file_search_tool = FileSearchTool() - file_search_tool.add_vector_store(vector_store.id) - - # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to search the file - # also, you do not need to provide tool_resources if you did not create a vector store above + # Create agent with file search tool agent = ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", - tool_resources=ToolResources(file_search=FileSearchToolResource(vector_store_ids=[vector_store.id])), ) print(f"Created agent, agent ID: {agent.id}") thread = ai_client.agents.create_thread() print(f"Created thread, thread ID: {thread.id}") - # create a message with the attachment - attachment = MessageAttachment(file_id=file.id, tools=file_search_tool.definitions) + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) message = ai_client.agents.create_message( thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] ) @@ -77,9 +68,6 @@ ai_client.agents.delete_file(file.id) print("Deleted file") - ai_client.agents.delete_vector_store(vector_store.id) - print("Deleted vectore store") - ai_client.agents.delete_agent(agent.id) print("Deleted agent") From f3aa09809b9152eb033d36eda4d41b018a820ca4 Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Fri, 18 Oct 2024 10:23:03 -0700 Subject: [PATCH 036/138] Clean up (#37985) --- .../azure/ai/client/aio/operations/_patch.py | 8 ++++---- .../azure/ai/client/operations/_patch.py | 8 ++++---- ...nts_vector_store_batch_file_search_async.py | 18 ++---------------- ...le_agents_vector_store_batch_file_search.py | 18 ++---------------- 4 files changed, 12 insertions(+), 40 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index ab68d88d4309..0d7d924a2ac2 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -1440,14 +1440,14 @@ async def upload_file( """ @overload - async def upload_file(self, file_path: str, *, purpose: str, **kwargs: Any) -> _models.OpenAIFile: + async def upload_file(self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. :param file_path: Required. :type file_path: str :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: ~azure.ai.client.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: @@ -1549,7 +1549,7 @@ async def upload_file_and_poll( @overload async def upload_file_and_poll( - self, file_path: str, *, purpose: str, sleep_interval: float = 1, **kwargs: Any + self, file_path: str, *, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any ) -> _models.OpenAIFile: """Uploads a file for use by other operations. @@ -1557,7 +1557,7 @@ async def upload_file_and_poll( :type file_path: str :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. :paramtype sleep_interval: float diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index f3cba5090134..41cb15216239 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -1453,14 +1453,14 @@ def upload_file( """ @overload - def upload_file(self, file_path: str, *, purpose: str, **kwargs: Any) -> _models.OpenAIFile: + def upload_file(self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. :param file_path: Required. :type file_path: str :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping :rtype: ~azure.ai.client.models.OpenAIFile :raises ~azure.core.exceptions.HttpResponseError: @@ -1562,7 +1562,7 @@ def upload_file_and_poll( @overload def upload_file_and_poll( - self, file_path: str, *, purpose: str, sleep_interval: float = 1, **kwargs: Any + self, file_path: str, *, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any ) -> _models.OpenAIFile: """Uploads a file for use by other operations. @@ -1570,7 +1570,7 @@ def upload_file_and_poll( :type file_path: str :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str + :paramtype purpose: str or ~azure.ai.client.models.FilePurpose :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. :paramtype sleep_interval: float diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py index c46370febe05..e1e70ef9a94d 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -38,18 +38,6 @@ async def main(): conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) - # Or, you can create the Azure AI Client by giving all required parameters directly - """ - ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging - ) - """ - async with ai_client: # upload a file and wait for it to be processed @@ -57,19 +45,17 @@ async def main(): print(f"Uploaded file, file ID: {file.id}") # create a vector store with no file and wait for it to be processed - # if you do not specify a vector store, create_message will create a vector store with a default expiration policy of seven days after they were last active vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") print(f"Created vector store, vector store ID: {vector_store.id}") - # add the file to the vector store + # add the file to the vector store or you can supply file ids in the vector store creation vector_store_file_batch = await ai_client.agents.create_vector_store_file_batch_and_poll(vector_store_id=vector_store.id, file_ids=[file.id]) print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") # create a file search tool file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to search the file - # also, you do not need to provide tool_resources if you did not create a vector store above + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file agent = await ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py index a17607733153..562e07a6a886 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -36,18 +36,6 @@ conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] ) -# Or, you can create the Azure AI Client by giving all required parameters directly -""" -ai_client = AzureAIClient( - credential=DefaultAzureCredential(), - host_name=os.environ["AI_CLIENT_HOST_NAME"], - subscription_id=os.environ["AI_CLIENT_SUBSCRIPTION_ID"], - resource_group_name=os.environ["AI_CLIENT_RESOURCE_GROUP_NAME"], - workspace_name=os.environ["AI_CLIENT_WORKSPACE_NAME"], - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging -) -""" - with ai_client: # upload a file and wait for it to be processed @@ -55,19 +43,17 @@ print(f"Uploaded file, file ID: {file.id}") # create a vector store with no file and wait for it to be processed - # if you do not specify a vector store, create_message will create a vector store with a default expiration policy of seven days after they were last active vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") print(f"Created vector store, vector store ID: {vector_store.id}") - # add the file to the vector store + # add the file to the vector store or you can supply file ids in the vector store creation vector_store_file_batch = ai_client.agents.create_vector_store_file_batch_and_poll(vector_store_id=vector_store.id, file_ids=[file.id]) print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") # create a file search tool file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to search the file - # also, you do not need to provide tool_resources if you did not create a vector store above + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file agent = ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", From e69d662c0ef47a1a65b30de04c2acf9d75749861 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 18 Oct 2024 10:46:48 -0700 Subject: [PATCH 037/138] Rename connection string environment variables Rename AI_CLIENT_CONNECTION_STRING to PROJECT_CONNECTION_STRING, to match how it's shown in Azure AI Studio. Also rename AZURE_AI_CLIENT_AGENTS_CONNECTION_STRING (used in Agents tests) to PROJECT_CONNECTION_STRING_AGENTS_TESTS --- .../agents/async_samples/sample_agents_basics_async.py | 4 ++-- .../agents/async_samples/sample_agents_functions_async.py | 4 ++-- .../async_samples/sample_agents_stream_eventhandler_async.py | 4 ++-- .../sample_agents_stream_eventhandler_with_toolset_async.py | 4 ++-- .../async_samples/sample_agents_stream_iteration_async.py | 4 ++-- .../sample_agents_vector_store_batch_file_search_async.py | 4 ++-- .../sample_agents_with_file_search_attachment_async.py | 4 ++-- sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py | 4 ++-- .../agents/sample_agents_code_interpreter_attachment.py | 4 ++-- .../samples/agents/sample_agents_file_search.py | 4 ++-- .../azure-ai-client/samples/agents/sample_agents_functions.py | 4 ++-- .../samples/agents/sample_agents_run_with_toolset.py | 4 ++-- .../samples/agents/sample_agents_stream_eventhandler.py | 4 ++-- .../sample_agents_stream_eventhandler_with_functions.py | 4 ++-- .../agents/sample_agents_stream_eventhandler_with_toolset.py | 4 ++-- .../samples/agents/sample_agents_stream_iteration.py | 4 ++-- .../agents/sample_agents_stream_iteration_with_toolset.py | 4 ++-- .../agents/sample_agents_vector_store_batch_file_search.py | 4 ++-- .../agents/sample_agents_with_file_search_attachment.py | 4 ++-- .../connections/async_samples/sample_connections_async.py | 4 ++-- .../azure-ai-client/samples/connections/sample_connections.py | 4 ++-- .../async_samples/sample_get_azure_openai_client_async.py | 4 ++-- .../async_samples/sample_get_chat_completions_client_async.py | 4 ++-- .../async_samples/sample_get_embeddings_client_async.py | 4 ++-- .../samples/inference/sample_get_azure_openai_client.py | 4 ++-- .../samples/inference/sample_get_chat_completions_client.py | 4 ++-- .../samples/inference/sample_get_embeddings_client.py | 4 ++-- sdk/ai/azure-ai-client/tests/agents/test_agents_client.py | 4 ++-- 28 files changed, 56 insertions(+), 56 deletions(-) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py index 94eae399cf63..5824588df32e 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import asyncio import time @@ -36,7 +36,7 @@ async def main(): # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) async with ai_client: diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py index 58d7872e1e16..da0cd590bc2b 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import asyncio import time @@ -38,7 +38,7 @@ async def main(): # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) async with ai_client: diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py index fbd5f1cb4977..6d7a61fbf551 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import asyncio from typing import Any @@ -63,7 +63,7 @@ async def main(): # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) async with ai_client: diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py index 1f6be096c705..df6457829769 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import asyncio from typing import Any @@ -70,7 +70,7 @@ async def main(): # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) # Initialize toolset with user functions diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py index 989c565c43f6..378ab820cab4 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import asyncio @@ -36,7 +36,7 @@ async def main(): # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) async with ai_client: diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py index e1e70ef9a94d..79981d834f90 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import asyncio @@ -35,7 +35,7 @@ async def main(): ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) async with ai_client: diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py index 89c39c6b133b..0a18f046b551 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import asyncio @@ -36,7 +36,7 @@ async def main(): # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) # upload a file and wait for it to be processed diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py index f0d4e68d6219..52a91f9ec043 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os, time @@ -31,7 +31,7 @@ ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) with ai_client: diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py index bd4acf9b7fc4..f1759f9bd3ab 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os @@ -34,7 +34,7 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) with ai_client: diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py index 06b550bbd64f..df47c9413b7f 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os @@ -32,7 +32,7 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) with ai_client: diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py index 0f26d174c65a..c403732b7d8f 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os, time from azure.ai.client import AzureAIClient @@ -32,7 +32,7 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) # Initialize function tool with user functions diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py index 75df4ab5f3cb..11897b9f1f85 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os @@ -34,7 +34,7 @@ ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) # Initialize agent toolset with user functions and code interpreter diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py index 0b883a7f39ac..a20b6176a616 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os @@ -43,7 +43,7 @@ ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py index 56733cfb80f9..77b3796044f3 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os @@ -38,7 +38,7 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) class MyEventHandler(AgentEventHandler): diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index fd64b52779d1..3765362d4c78 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os @@ -41,7 +41,7 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) # When using FunctionTool with ToolSet in agent creation, the tool call events are handled inside the create_stream diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py index 3d860038710a..3c223ac162b9 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os @@ -40,7 +40,7 @@ ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) with ai_client: diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py index 05c6e1878289..cfe2167697ae 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os @@ -36,7 +36,7 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) # Function to handle tool stream iteration diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py index 562e07a6a886..c5f0ac29ae4e 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os @@ -33,7 +33,7 @@ ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) with ai_client: diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py index 83e2a198586c..5e3a6f2f4d39 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os @@ -34,7 +34,7 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) with ai_client: diff --git a/sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py index ba37c07034ac..f1950b6ed07e 100644 --- a/sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py @@ -18,7 +18,7 @@ pip install azure.ai.client aiohttp azure-identity Set the environment variables with your own values: - 1) AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import asyncio @@ -33,7 +33,7 @@ async def sample_connections_async(): # It should be in the format ";;;" ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) async with ai_client: diff --git a/sdk/ai/azure-ai-client/samples/connections/sample_connections.py b/sdk/ai/azure-ai-client/samples/connections/sample_connections.py index ce34f8fd554d..0ed10ac77017 100644 --- a/sdk/ai/azure-ai-client/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-client/samples/connections/sample_connections.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set the environment variables with your own values: - 1) AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os @@ -34,7 +34,7 @@ # It should be in the format ";;;" ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) with ai_client: diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py index 3376675d4b5a..08f57cd10c79 100644 --- a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py +++ b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py @@ -18,7 +18,7 @@ pip install azure.ai.client aiohttp openai_async Set this environment variable with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os import asyncio @@ -30,7 +30,7 @@ async def sample_get_azure_openai_client_async(): async with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as ai_client: # Get an authenticated AsyncAzureOpenAI client for your default Azure OpenAI connection: diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py index 625b4b3fc7b1..efee31557786 100644 --- a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py +++ b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py @@ -18,7 +18,7 @@ pip install azure.ai.client aiohttp azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os import asyncio @@ -31,7 +31,7 @@ async def sample_get_chat_completions_client_async(): async with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as ai_client: # Get an authenticated async ChatCompletionsClient (from azure.ai.inference) for your default Serverless connection: diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py index a9e38df25949..2de28d1b512d 100644 --- a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py +++ b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py @@ -18,7 +18,7 @@ pip install azure.ai.client aiohttp azure-identity Set this environment variable with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import asyncio import os @@ -30,7 +30,7 @@ async def sample_get_embeddings_client_async(): async with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as ai_client: # Get an authenticated async azure.ai.inference embeddings client for your default Serverless connection: diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py index 4d2c0dab80fd..01f193507201 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py @@ -18,7 +18,7 @@ pip install azure.ai.client openai Set this environment variable with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os from azure.ai.client import AzureAIClient @@ -26,7 +26,7 @@ with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as ai_client: # Get an authenticated OpenAI client for your default Azure OpenAI connection: diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py index af508bd87571..3835e1b6dc88 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variables with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os from azure.ai.client import AzureAIClient @@ -27,7 +27,7 @@ with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as ai_client: # Get an authenticated azure.ai.inference chat completions client for your default Serverless connection: diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py index e1a1a22dae82..57828b7eaffd 100644 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py +++ b/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py @@ -18,7 +18,7 @@ pip install azure.ai.client azure-identity Set this environment variable with your own values: - AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os from azure.ai.client import AzureAIClient @@ -26,7 +26,7 @@ with AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], + conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as ai_client: # Get an authenticated azure.ai.inference embeddings client for your default Serverless connection: diff --git a/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py index 665e8c4a7cfd..38ff00619039 100644 --- a/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py @@ -46,7 +46,7 @@ agentClientPreparer = functools.partial( EnvironmentVariableLoader, "azure_ai_client", - azure_ai_client_agents_connection_string="https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", + project_connection_string_agents_tests="https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", ) """ agentClientPreparer = functools.partial( @@ -96,7 +96,7 @@ class TestagentClient(AzureRecordedTestCase): # helper function: create client and using environment variables def create_client(self, **kwargs): # fetch environment variables - connection_string = kwargs.pop("azure_ai_client_agents_connection_string") + connection_string = kwargs.pop("project_connection_string_agents_tests") credential = self.get_credential(AzureAIClient, is_async=False) # create and return client From 715c96e957ea569c6bf84d4d3c7fbf35f012a13f Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Fri, 18 Oct 2024 12:11:10 -0800 Subject: [PATCH 038/138] updated codeinterpreter attachment sample (#37990) --- .../sample_agents_code_interpreter_attachment.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py index f1759f9bd3ab..af690c4ce369 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py @@ -25,7 +25,7 @@ from azure.ai.client import AzureAIClient from azure.ai.client.models import CodeInterpreterTool from azure.ai.client.models import FilePurpose -from azure.ai.client.models import CodeInterpreterToolDefinition, MessageAttachment +from azure.ai.client.models import MessageAttachment from azure.identity import DefaultAzureCredential @@ -43,14 +43,13 @@ print(f"Uploaded file, file ID: {file.id}") code_interpreter = CodeInterpreterTool() - code_interpreter.add_file(file.id) - # notices that CodeInterpreterToolDefinition as tool must be added or the assistant unable to view the file + # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment agent = ai_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", - tools=[CodeInterpreterToolDefinition()], + tools=code_interpreter.definitions, ) print(f"Created agent, agent ID: {agent.id}") @@ -65,7 +64,11 @@ print(f"Created message, message ID: {message.id}") run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, run ID: {run.id}") + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") ai_client.agents.delete_file(file.id) print("Deleted file") From 7fa5125b0dc4b80b7ec22e4922e9b510663feac4 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Fri, 18 Oct 2024 16:41:08 -0700 Subject: [PATCH 039/138] Filter parameters, returned by SSE event (#37991) * nirovins/filter_parameters * Fixes * Move utility functions out of class --- .../azure/ai/client/models/_patch.py | 53 +++++++++--- .../tests/endpoints/unit_tests.py | 80 +++++++++++++++++++ 2 files changed, 123 insertions(+), 10 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index e5022c3c3216..3a5c1e56681c 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -41,6 +41,39 @@ logger = logging.getLogger(__name__) +def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[str, Any]: + """ + Remove the parameters, non present in class public fields; return shallow copy of a dictionary. + + **Note:** Classes inherited from the model check that the parameters are present + in the list of attributes and if they are not, the error is being raised. This check may not + be relevant for classes, not inherited from azure.ai.client._model_base.Model. + :param model_class: The class of model to be used. + :param parameters: The parsed dictionary with parameters. + :return: The dictionary with all invalid parameters removed. + """ + new_params = {} + valid_parameters = set( + filter(lambda x: not x.startswith('_') and hasattr(model_class.__dict__[x], "_type"), model_class.__dict__.keys()) + ) + for k in filter(lambda x: x in valid_parameters, parameters.keys()): + new_params[k] = parameters[k] + return new_params + + +def _safe_instantiate(model_class: Type, parameters: Dict[str, Any]) -> Any: + """ + Instantiate class with the set of parameters from the server. + + :param model_class: The class of model to be used. + :param parameters: The parsed dictionary with parameters. + :return: The class of model_class type if parameters is a dictionary, or the parameters themselves otherwise. + """ + if not isinstance(parameters, dict): + return parameters + return model_class(**_filter_parameters(model_class, parameters)) + + class ConnectionProperties: def __init__(self, *, connection: ConnectionsListSecretsResponse, token_credential: TokenCredential = None) -> None: @@ -676,7 +709,7 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: AgentStreamEvent.THREAD_RUN_CANCELLED, AgentStreamEvent.THREAD_RUN_EXPIRED, }: - event_data_obj = ThreadRun(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + event_data_obj = _safe_instantiate(ThreadRun, parsed_data) elif event_type in { AgentStreamEvent.THREAD_RUN_STEP_CREATED, AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, @@ -685,18 +718,18 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, }: - event_data_obj = RunStep(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + event_data_obj = _safe_instantiate(RunStep, parsed_data) elif event_type in { AgentStreamEvent.THREAD_MESSAGE_CREATED, AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, AgentStreamEvent.THREAD_MESSAGE_COMPLETED, AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, }: - event_data_obj = ThreadMessage(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + event_data_obj = _safe_instantiate(ThreadMessage, parsed_data) elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: - event_data_obj = MessageDeltaChunk(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + event_data_obj = _safe_instantiate(MessageDeltaChunk, parsed_data) elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: - event_data_obj = RunStepDeltaChunk(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + event_data_obj =_safe_instantiate(RunStepDeltaChunk, parsed_data) else: event_data_obj = parsed_data @@ -823,7 +856,7 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: AgentStreamEvent.THREAD_RUN_CANCELLED, AgentStreamEvent.THREAD_RUN_EXPIRED, }: - event_data_obj = ThreadRun(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + event_data_obj = _safe_instantiate(ThreadRun, parsed_data) elif event_type in { AgentStreamEvent.THREAD_RUN_STEP_CREATED, AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, @@ -832,18 +865,18 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, }: - event_data_obj = RunStep(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + event_data_obj = _safe_instantiate(RunStep, parsed_data) elif event_type in { AgentStreamEvent.THREAD_MESSAGE_CREATED, AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, AgentStreamEvent.THREAD_MESSAGE_COMPLETED, AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, }: - event_data_obj = ThreadMessage(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + event_data_obj = _safe_instantiate(ThreadMessage, parsed_data) elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: - event_data_obj = MessageDeltaChunk(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + event_data_obj = _safe_instantiate(MessageDeltaChunk, parsed_data) elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: - event_data_obj = RunStepDeltaChunk(**parsed_data) if isinstance(parsed_data, dict) else parsed_data + event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) else: event_data_obj = parsed_data diff --git a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py index 341224aae4e5..7361c8d0f4f0 100644 --- a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py +++ b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py @@ -2,12 +2,17 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ +import copy import sys import logging import datetime +import pytest from azure.ai.client.models import SASTokenCredential from azure.core.credentials import TokenCredential, AccessToken from azure.core.exceptions import HttpResponseError +from azure.ai.client.models._models import ThreadRun, RunStep, ThreadMessage +from azure.ai.client.models._patch import _safe_instantiate, _filter_parameters + # import azure.ai.client as sdk @@ -112,3 +117,78 @@ def test_sas_token_credential_class_real(self, **kwargs): print(f"[TEST] Actual expiration date: {sas_token_credential._expires_on}") assert sas_token_credential._expires_on == expiration_datatime_utc + + + @pytest.mark.parametrize( + 'valid_params,model_cls', + [ + ({ + 'id': '12345', + 'object': "thread.run", + 'thread_id': "6789", + 'assistant_id': "101112", + 'status': 'in_progress', + 'required_action': 'test', + 'last_error': 'none', + 'model': 'gpt-4', + 'instructions': "Test instruction", + 'tools': 'Test function', + 'created_at': datetime.datetime(2024, 11, 14), + 'expires_at': datetime.datetime(2024, 11, 17), + 'started_at': datetime.datetime(2024, 11, 15), + 'completed_at': datetime.datetime(2024, 11, 16), + 'cancelled_at': datetime.datetime(2024, 11, 16), + 'failed_at': datetime.datetime(2024, 11, 16), + 'incomplete_details': 'max_completion_tokens', + 'usage': 'in_progress', + 'temperature': 1.0, + 'top_p': 1.0, + 'max_completion_tokens': 1000, + 'truncation_strategy': 'test', + 'tool_choice': "tool name", + 'response_format': "json", + 'metadata': {'foo': 'bar'}, + 'tool_resources': "test", + 'parallel_tool_calls': True + }, ThreadRun), + ({ + 'id': '1233', + 'object': 'thread.message', + 'created_at': datetime.datetime(2024, 11, 14), + 'thread_id': '5678', + 'status': 'incomplete', + 'incomplete_details': "test", + 'completed_at': datetime.datetime(2024, 11, 16), + 'incomplete_at': datetime.datetime(2024, 11, 16), + 'role': 'assistant', + 'content': 'Test', + 'assistant_id': '9911', + 'run_id': '11', + 'attachments': ['4', '8', '15', '16', '23', '42'], + 'metadata': {'foo', 'bar'} + }, ThreadMessage) + ] + ) + def test_correct_thread_params(self, valid_params, model_cls): + """Test that if service returned extra parameter in SSE response, it does not create issues.""" + + bad_params = {'foo': 'bar'} + params = copy.deepcopy(valid_params) + params.update(bad_params) + # We should bot e able to create Thread Run with bad parameters. + with pytest.raises(TypeError): + model_cls(**params) + filtered_params = _filter_parameters(model_cls, params) + for k in valid_params: + assert k in filtered_params + for k in bad_params: + assert k not in filtered_params + # Implicitly check that we can create object with the filtered parameters. + model_cls(**filtered_params) + # Check safe initialization. + assert isinstance(_safe_instantiate(model_cls, params), model_cls) + + + def test_safe_instantiate_non_dict(self): + """Test that safe_instantiate method when user supplies not a dictionary.""" + assert _safe_instantiate(RunStep, 42) == 42 From ecd9d804690e3761de8e224c2b40d0c18d210726 Mon Sep 17 00:00:00 2001 From: Ankit Singhal <30610298+singankit@users.noreply.github.com> Date: Mon, 21 Oct 2024 11:54:44 -0700 Subject: [PATCH 040/138] Evaluation data set id change revert (#38007) --- .../azure/ai/client/models/__init__.py | 8 +- .../azure/ai/client/models/_models.py | 75 ++++++++++--------- sdk/ai/azure-ai-client/tsp-location.yaml | 2 +- 3 files changed, 44 insertions(+), 41 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py index 9678b219f046..9d07edc2486e 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py @@ -15,7 +15,7 @@ from ._models import AppInsightsConfiguration from ._models import AzureAISearchResource from ._models import AzureAISearchToolDefinition -from ._models import BingSearchToolDefinition +from ._models import BingGroundingToolDefinition from ._models import CodeInterpreterToolDefinition from ._models import CodeInterpreterToolResource from ._models import ConnectionListResource @@ -78,7 +78,7 @@ from ._models import RunError from ._models import RunStep from ._models import RunStepAzureAISearchToolCall -from ._models import RunStepBingSearchToolCall +from ._models import RunStepBingGroundingToolCall from ._models import RunStepCodeInterpreterImageOutput from ._models import RunStepCodeInterpreterImageReference from ._models import RunStepCodeInterpreterLogOutput @@ -195,7 +195,7 @@ "AppInsightsConfiguration", "AzureAISearchResource", "AzureAISearchToolDefinition", - "BingSearchToolDefinition", + "BingGroundingToolDefinition", "CodeInterpreterToolDefinition", "CodeInterpreterToolResource", "ConnectionListResource", @@ -258,7 +258,7 @@ "RunError", "RunStep", "RunStepAzureAISearchToolCall", - "RunStepBingSearchToolCall", + "RunStepBingGroundingToolCall", "RunStepCodeInterpreterImageOutput", "RunStepCodeInterpreterImageReference", "RunStepCodeInterpreterLogOutput", diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py index 248f6023dc00..1228a60103d1 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py @@ -480,7 +480,7 @@ class ToolDefinition(_model_base.Model): """An abstract representation of an input tool definition that an agent can use. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - AzureAISearchToolDefinition, BingSearchToolDefinition, CodeInterpreterToolDefinition, + AzureAISearchToolDefinition, BingGroundingToolDefinition, CodeInterpreterToolDefinition, FileSearchToolDefinition, FunctionToolDefinition, MicrosoftFabricToolDefinition, SharepointToolDefinition @@ -540,17 +540,19 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, type="azure_ai_search", **kwargs) -class BingSearchToolDefinition(ToolDefinition, discriminator="bing_search"): - """The input definition information for a bing search tool as used to configure an agent. +class BingGroundingToolDefinition(ToolDefinition, discriminator="bing_grounding"): + """The input definition information for a bing grounding search tool as used to configure an + agent. - :ivar type: The object type, which is always 'bing_search'. Required. Default value is - "bing_search". + :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is + "bing_grounding". :vartype type: str """ - type: Literal["bing_search"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'bing_search'. Required. Default value is \"bing_search\".""" + type: Literal["bing_grounding"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'bing_grounding'. Required. Default value is + \"bing_grounding\".""" @overload def __init__( @@ -565,7 +567,7 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="bing_search", **kwargs) + super().__init__(*args, type="bing_grounding", **kwargs) class CodeInterpreterToolDefinition(ToolDefinition, discriminator="code_interpreter"): @@ -630,7 +632,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles class ConnectionListResource(_model_base.Model): - """A set of connection resources currently used by either the ``bing_search``\\ , + """A set of connection resources currently used by either the ``bing_grounding``\\ , ``microsoft_fabric``\\ , or ``sharepoint`` tools. :ivar connection_list: The connections attached to this agent. There can be a maximum of 1 @@ -917,7 +919,7 @@ class Dataset(InputData, discriminator="dataset"): type: Literal["dataset"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore """Required. Default value is \"dataset\".""" - id: str = rest_field(name="Uri") + id: str = rest_field() """Evaluation input data. Required.""" @overload @@ -3242,7 +3244,7 @@ class RunStepToolCall(_model_base.Model): existing run. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepAzureAISearchToolCall, RunStepBingSearchToolCall, RunStepCodeInterpreterToolCall, + RunStepAzureAISearchToolCall, RunStepBingGroundingToolCall, RunStepCodeInterpreterToolCall, RunStepFileSearchToolCall, RunStepFunctionToolCall, RunStepMicrosoftFabricToolCall, RunStepSharepointToolCall @@ -3320,25 +3322,26 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useles super().__init__(*args, type="azure_ai_search", **kwargs) -class RunStepBingSearchToolCall(RunStepToolCall, discriminator="bing_search"): - """A record of a call to a bing search tool, issued by the model in evaluation of a defined tool, - that represents - executed bing search. +class RunStepBingGroundingToolCall(RunStepToolCall, discriminator="bing_grounding"): + """A record of a call to a bing grounding tool, issued by the model in evaluation of a defined + tool, that represents + executed search with bing grounding. :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. Required. :vartype id: str - :ivar type: The object type, which is always 'bing_search'. Required. Default value is - "bing_search". + :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is + "bing_grounding". :vartype type: str - :ivar bing_search: Reserved for future use. Required. - :vartype bing_search: dict[str, str] + :ivar bing_grounding: Reserved for future use. Required. + :vartype bing_grounding: dict[str, str] """ - type: Literal["bing_search"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'bing_search'. Required. Default value is \"bing_search\".""" - bing_search: Dict[str, str] = rest_field() + type: Literal["bing_grounding"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'bing_grounding'. Required. Default value is + \"bing_grounding\".""" + bing_grounding: Dict[str, str] = rest_field() """Reserved for future use. Required.""" @overload @@ -3346,7 +3349,7 @@ def __init__( self, *, id: str, # pylint: disable=redefined-builtin - bing_search: Dict[str, str], + bing_grounding: Dict[str, str], ) -> None: ... @overload @@ -3357,7 +3360,7 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="bing_search", **kwargs) + super().__init__(*args, type="bing_grounding", **kwargs) class RunStepCodeInterpreterToolCallOutput(_model_base.Model): @@ -5195,9 +5198,9 @@ class ToolResources(_model_base.Model): :ivar file_search: Resources to be used by the ``file_search`` tool consisting of vector store IDs. :vartype file_search: ~azure.ai.client.models.FileSearchToolResource - :ivar bing_search: Resources to be used by the ``bing_search`` tool consisting of connection - IDs. - :vartype bing_search: ~azure.ai.client.models.ConnectionListResource + :ivar bing_grounding: Resources to be used by the ``bing_grounding`` tool consisting of + connection IDs. + :vartype bing_grounding: ~azure.ai.client.models.ConnectionListResource :ivar microsoft_fabric: Resources to be used by the ``microsoft_fabric`` tool consisting of connection IDs. :vartype microsoft_fabric: ~azure.ai.client.models.ConnectionListResource @@ -5213,8 +5216,8 @@ class ToolResources(_model_base.Model): """Resources to be used by the ``code_interpreter tool`` consisting of file IDs.""" file_search: Optional["_models.FileSearchToolResource"] = rest_field() """Resources to be used by the ``file_search`` tool consisting of vector store IDs.""" - bing_search: Optional["_models.ConnectionListResource"] = rest_field() - """Resources to be used by the ``bing_search`` tool consisting of connection IDs.""" + bing_grounding: Optional["_models.ConnectionListResource"] = rest_field() + """Resources to be used by the ``bing_grounding`` tool consisting of connection IDs.""" microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() """Resources to be used by the ``microsoft_fabric`` tool consisting of connection IDs.""" share_point: Optional["_models.ConnectionListResource"] = rest_field(name="sharepoint") @@ -5228,7 +5231,7 @@ def __init__( *, code_interpreter: Optional["_models.CodeInterpreterToolResource"] = None, file_search: Optional["_models.FileSearchToolResource"] = None, - bing_search: Optional["_models.ConnectionListResource"] = None, + bing_grounding: Optional["_models.ConnectionListResource"] = None, microsoft_fabric: Optional["_models.ConnectionListResource"] = None, share_point: Optional["_models.ConnectionListResource"] = None, azure_ai_search: Optional["_models.AzureAISearchResource"] = None, @@ -5361,9 +5364,9 @@ class UpdateToolResourcesOptions(_model_base.Model): :ivar file_search: Overrides the vector store attached to this agent. There can be a maximum of 1 vector store attached to the agent. :vartype file_search: ~azure.ai.client.models.UpdateFileSearchToolResourceOptions - :ivar bing_search: Overrides the list of connections to be used by the ``bing_search`` tool - consisting of connection IDs. - :vartype bing_search: ~azure.ai.client.models.ConnectionListResource + :ivar bing_grounding: Overrides the list of connections to be used by the ``bing_grounding`` + tool consisting of connection IDs. + :vartype bing_grounding: ~azure.ai.client.models.ConnectionListResource :ivar microsoft_fabric: Overrides the list of connections to be used by the ``microsoft_fabric`` tool consisting of connection IDs. :vartype microsoft_fabric: ~azure.ai.client.models.ConnectionListResource @@ -5382,8 +5385,8 @@ class UpdateToolResourcesOptions(_model_base.Model): file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = rest_field() """Overrides the vector store attached to this agent. There can be a maximum of 1 vector store attached to the agent.""" - bing_search: Optional["_models.ConnectionListResource"] = rest_field() - """Overrides the list of connections to be used by the ``bing_search`` tool consisting of + bing_grounding: Optional["_models.ConnectionListResource"] = rest_field() + """Overrides the list of connections to be used by the ``bing_grounding`` tool consisting of connection IDs.""" microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() """Overrides the list of connections to be used by the ``microsoft_fabric`` tool consisting of @@ -5401,7 +5404,7 @@ def __init__( *, code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = None, file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = None, - bing_search: Optional["_models.ConnectionListResource"] = None, + bing_grounding: Optional["_models.ConnectionListResource"] = None, microsoft_fabric: Optional["_models.ConnectionListResource"] = None, share_point: Optional["_models.ConnectionListResource"] = None, azure_ai_search: Optional["_models.AzureAISearchResource"] = None, diff --git a/sdk/ai/azure-ai-client/tsp-location.yaml b/sdk/ai/azure-ai-client/tsp-location.yaml index 810ef45737fc..df07cabfc8cf 100644 --- a/sdk/ai/azure-ai-client/tsp-location.yaml +++ b/sdk/ai/azure-ai-client/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Client -commit: 2d73eae253e8f17286d5ca85eefb9330b6a93d3d +commit: 5e47aaf15376915f221a0527f344733ce7f924cb repo: Azure/azure-rest-api-specs additionalDirectories: From 7cc54a7986a5a30ba51339335ec1ad51e3f71a41 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 21 Oct 2024 17:04:24 -0700 Subject: [PATCH 041/138] Updates to .connection operators (#38024) - Re-emit from latest TypeSpec - Updates to .connections operators to support the new preview version of REST API - Add "id" on ConnectionProperties --- .../ai/client/aio/operations/_operations.py | 158 +++++++++----- .../azure/ai/client/aio/operations/_patch.py | 196 ++++++++++++----- .../azure/ai/client/models/_enums.py | 4 + .../azure/ai/client/models/_models.py | 25 ++- .../azure/ai/client/models/_patch.py | 32 ++- .../azure/ai/client/operations/_operations.py | 201 +++++++++++++----- .../azure/ai/client/operations/_patch.py | 186 +++++++++++----- ...ts_vector_store_batch_file_search_async.py | 37 ++-- ...gents_with_file_search_attachment_async.py | 4 +- ...ents_stream_eventhandler_with_functions.py | 1 + ...agents_stream_eventhandler_with_toolset.py | 1 + ...le_agents_stream_iteration_with_toolset.py | 1 + ...e_agents_vector_store_batch_file_search.py | 34 +-- .../async_samples/sample_connections_async.py | 38 ++-- .../samples/connections/sample_connections.py | 28 +-- .../tests/endpoints/unit_tests.py | 106 ++++----- sdk/ai/azure-ai-client/tsp-location.yaml | 2 +- 17 files changed, 704 insertions(+), 350 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py index 989d32dfa108..201a6016dc89 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py @@ -73,6 +73,7 @@ build_agents_update_run_request, build_agents_update_thread_request, build_agents_upload_file_request, + build_connections_get_request, build_connections_list_request, build_connections_list_secrets_request, build_evaluations_create_or_replace_schedule_request, @@ -4964,9 +4965,24 @@ def __init__(self, *args, **kwargs) -> None: self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async - async def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: # pylint: disable=protected-access + async def _list( # pylint: disable=protected-access + self, + *, + category: Optional[Union[str, _models.ConnectionType]] = None, + include_all: Optional[bool] = None, + target: Optional[str] = None, + **kwargs: Any + ) -> _models._models.ConnectionsListResponse: """List the details of all the connections (not including their credentials). + :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". Default value is None. + :paramtype category: str or ~azure.ai.client.models.ConnectionType + :keyword include_all: Indicates whether to list datastores. Service default: do not list + datastores. Default value is None. + :paramtype include_all: bool + :keyword target: Target of the workspace connection. Default value is None. + :paramtype target: str :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping :rtype: ~azure.ai.client.models._models.ConnectionsListResponse :raises ~azure.core.exceptions.HttpResponseError: @@ -4985,6 +5001,9 @@ async def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) _request = build_connections_list_request( + category=category, + include_all=include_all, + target=target, api_version=self._config.api_version, headers=_headers, params=_params, @@ -5027,58 +5046,101 @@ async def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: return deserialized # type: ignore + @distributed_trace_async + async def _get( # pylint: disable=protected-access + self, connection_name: str, **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: + """Get the details of a single connection, without credentials. + + :param connection_name: Connection Name. Required. + :type connection_name: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.client.models._models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + + _request = build_connections_get_request( + connection_name=connection_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + @overload async def _list_secrets( # pylint: disable=protected-access - self, connection_name_in_url: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models._models.ConnectionsListSecretsResponse: ... @overload async def _list_secrets( # pylint: disable=protected-access - self, - connection_name_in_url: str, - *, - connection_name: str, - subscription_id: str, - resource_group_name: str, - workspace_name: str, - api_version_in_body: str, - content_type: str = "application/json", - **kwargs: Any + self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any ) -> _models._models.ConnectionsListSecretsResponse: ... @overload async def _list_secrets( # pylint: disable=protected-access - self, connection_name_in_url: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, connection_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models._models.ConnectionsListSecretsResponse: ... @distributed_trace_async async def _list_secrets( # pylint: disable=protected-access - self, - connection_name_in_url: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - connection_name: str = _Unset, - subscription_id: str = _Unset, - resource_group_name: str = _Unset, - workspace_name: str = _Unset, - api_version_in_body: str = _Unset, - **kwargs: Any + self, connection_name: str, body: Union[JSON, IO[bytes]] = _Unset, *, ignored: str = _Unset, **kwargs: Any ) -> _models._models.ConnectionsListSecretsResponse: - """Get the details of a single connection, including credential (if available). + """Get the details of a single connection, including credentials (if available). - :param connection_name_in_url: Connection Name. Required. - :type connection_name_in_url: str + :param connection_name: Connection Name. Required. + :type connection_name: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword connection_name: Connection Name (should be the same as the connection name in the URL - path). Required. - :paramtype connection_name: str - :keyword subscription_id: The ID of the target subscription. Required. - :paramtype subscription_id: str - :keyword resource_group_name: The name of the Resource Group. Required. - :paramtype resource_group_name: str - :keyword workspace_name: The name of the workspace (Azure AI Studio hub). Required. - :paramtype workspace_name: str - :keyword api_version_in_body: The api version. Required. - :paramtype api_version_in_body: str + :keyword ignored: The body is ignored. TODO: Can we remove this?. Required. + :paramtype ignored: str :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with MutableMapping :rtype: ~azure.ai.client.models._models.ConnectionsListSecretsResponse @@ -5099,23 +5161,9 @@ async def _list_secrets( # pylint: disable=protected-access cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) if body is _Unset: - if connection_name is _Unset: - raise TypeError("missing required argument: connection_name") - if subscription_id is _Unset: - raise TypeError("missing required argument: subscription_id") - if resource_group_name is _Unset: - raise TypeError("missing required argument: resource_group_name") - if workspace_name is _Unset: - raise TypeError("missing required argument: workspace_name") - if api_version_in_body is _Unset: - raise TypeError("missing required argument: api_version_in_body") - body = { - "apiVersionInBody": api_version_in_body, - "connectionName": connection_name, - "resourceGroupName": resource_group_name, - "subscriptionId": subscription_id, - "workspaceName": workspace_name, - } + if ignored is _Unset: + raise TypeError("missing required argument: ignored") + body = {"ignored": ignored} body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -5125,7 +5173,7 @@ async def _list_secrets( # pylint: disable=protected-access _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_connections_list_secrets_request( - connection_name_in_url=connection_name_in_url, + connection_name=connection_name, content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py index 0d7d924a2ac2..5f84899dcf9c 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py @@ -13,7 +13,7 @@ import logging import os import time -from typing import IO, Any, AsyncIterator, Dict, List, AsyncIterable, MutableMapping, Optional, Union, cast, overload +from typing import IO, Any, AsyncIterator, Dict, List, Iterable, MutableMapping, Optional, Union, cast, overload from azure.ai.client import _types from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated @@ -35,9 +35,19 @@ class InferenceOperations: def __init__(self, outer_instance): self.outer_instance = outer_instance - async def get_chat_completions_client(self) -> "ChatCompletionsClient": + @distributed_trace_async + async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": + """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for the default + Serverless connection. The Serverless connection must have a Chat Completions AI model deployment. + The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. + + :return: An authenticated chat completions client + :rtype: ~azure.ai.inference.models.ChatCompletionsClient + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) connection = await self.outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, populate_secrets=True + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs ) if not connection: raise ValueError("No serverless connection found") @@ -55,7 +65,9 @@ async def get_chat_completions_client(self) -> "ChatCompletionsClient": ) from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) + ) elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth logger.debug( @@ -75,9 +87,19 @@ async def get_chat_completions_client(self) -> "ChatCompletionsClient": return client - async def get_embeddings_client(self) -> "EmbeddingsClient": + @distributed_trace_async + async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": + """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for the default + Serverless connection. The Serverless connection must have a Text Embeddings AI model deployment. + The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. + + :return: An authenticated chat completions client + :rtype: ~azure.ai.inference.models.EmbeddingsClient + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) connection = await self.outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, populate_secrets=True + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs ) if not connection: raise ValueError("No serverless connection found") @@ -101,7 +123,9 @@ async def get_embeddings_client(self) -> "EmbeddingsClient": logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" ) - client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=connection.properties.token_credential) + client = EmbeddingsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( @@ -113,9 +137,18 @@ async def get_embeddings_client(self) -> "EmbeddingsClient": return client - async def get_azure_openai_client(self) -> "AsyncAzureOpenAI": + @distributed_trace_async + async def get_azure_openai_client(self, **kwargs) -> "AsyncAzureOpenAI": + """Get an authenticated AsyncAzureOpenAI client (from the `openai` package) for the default + Azure OpenAI connection. The package `openai` must be installed prior to calling this method. + + :return: An authenticated AsyncAzureOpenAI client + :rtype: ~openai.AsyncAzureOpenAI + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) connection = await self.outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, populate_secrets=True + connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs ) if not connection: raise ValueError("No Azure OpenAI connection found.") @@ -171,25 +204,56 @@ async def get_azure_openai_client(self) -> "AsyncAzureOpenAI": class ConnectionsOperations(ConnectionsOperationsGenerated): - async def get_default(self, *, connection_type: ConnectionType, populate_secrets: bool = False) -> ConnectionProperties: + @distributed_trace_async + async def get_default( + self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any + ) -> ConnectionProperties: + """Get the properties of the default connection of a certain connection type, with or without + populating authentication credentials. + + :param connection_type: The connection type. Required. + :type connection_type: ~azure.ai.client.models._models.ConnectionType + :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :type with_credentials: bool + :return: The connection properties + :rtype: ~azure.ai.client.models._models.ConnectionProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) if not connection_type: raise ValueError("You must specify an connection type") - # Since there is no notion of service default at the moment, always return the first one - async for connection_properties in self.list(connection_type=connection_type, populate_secrets=populate_secrets): - return connection_properties - return None + # Since there is no notion of default connection at the moment, list all connections in the category + # and return the first one + connection_properties_list = await self.list(connection_type=connection_type, **kwargs) + if len(connection_properties_list) > 0: + if with_credentials: + return await self.get( + connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs + ) + else: + return connection_properties_list[0] + else: + return None - async def get(self, *, connection_name: str, populate_secrets: bool = False) -> ConnectionProperties: + @distributed_trace_async + async def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: + """Get the properties of a single connection, given its connection name, with or without + populating authentication credentials. + + :param connection_name: Connection Name. Required. + :type connection_name: str + :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :type with_credentials: bool + :return: The connection properties + :rtype: ~azure.ai.client.models._models.ConnectionProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) if not connection_name: raise ValueError("Endpoint name cannot be empty") - if populate_secrets: + if with_credentials: connection: ConnectionsListSecretsResponse = await self._list_secrets( - connection_name_in_url=connection_name, - connection_name=connection_name, - subscription_id=self._config.subscription_id, - resource_group_name=self._config.resource_group_name, - workspace_name=self._config.project_name, - api_version_in_body=self._config.api_version, + connection_name=connection_name, ignored="ignore", **kwargs ) if connection.properties.auth_type == AuthenticationType.AAD: return ConnectionProperties(connection=connection, token_credential=self._config.credential) @@ -208,26 +272,32 @@ async def get(self, *, connection_name: str, populate_secrets: bool = False) -> return ConnectionProperties(connection=connection) else: - internal_response: ConnectionsListResponse = await self._list() - for connection in internal_response.value: - if connection_name == connection.name: - return ConnectionProperties(connection=connection) - return None + return ConnectionProperties(connection=await self._get(connection_name=connection_name, **kwargs)) + @distributed_trace_async async def list( - self, *, connection_type: ConnectionType | None = None, populate_secrets: bool = False - ) -> AsyncIterable[ConnectionProperties]: - - # First make a REST call to /list to get all the connections, without secrets - connections_list: ConnectionsListResponse = await self._list() + self, *, connection_type: ConnectionType | None = None, **kwargs: Any + ) -> Iterable[ConnectionProperties]: + """List the properties of all connections, or all connections of a certain connection type. + + :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. + If not provided, all connections are listed. + :type connection_type: ~azure.ai.client.models._models.ConnectionType + :return: A list of connection properties + :rtype: Iterable[~azure.ai.client.models._models.ConnectionProperties] + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connections_list: ConnectionsListResponse = await self._list( + include_all=True, category=connection_type, **kwargs + ) - # Filter by connection type + # Iterate to create the simplified result property + connection_properties_list: List[ConnectionProperties] = [] for connection in connections_list.value: - if connection_type is None or connection.properties.category == connection_type: - if not populate_secrets: - yield ConnectionProperties(connection=connection) - else: - yield await self.get(connection_name=connection.name, populate_secrets=True) + connection_properties_list.append(ConnectionProperties(connection=connection)) + + return connection_properties_list class AgentsOperations(AgentsOperationsGenerated): @@ -338,7 +408,7 @@ async def create_agent( :paramtype description: str :keyword instructions: The system instructions for the new agent to use. Default value is None. :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). Default value is None. :paramtype toolset: ~azure.ai.client.models.ToolSet :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 @@ -412,7 +482,7 @@ async def create_agent( :param instructions: System instructions for the agent. :param tools: List of tools definitions for the agent. :param tool_resources: Resources used by the agent's tools. - :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). :param temperature: Sampling temperature for generating agent responses. :param top_p: Nucleus sampling parameter. @@ -1402,7 +1472,7 @@ async def _handle_submit_tool_outputs( else: logger.warning("Toolset is not available in the client.") return - + logger.info(f"Tool outputs: {tool_outputs}") if tool_outputs: async with await self.submit_tool_outputs_to_stream( @@ -1440,7 +1510,9 @@ async def upload_file( """ @overload - async def upload_file(self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any) -> _models.OpenAIFile: + async def upload_file( + self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any + ) -> _models.OpenAIFile: """Uploads a file for use by other operations. :param file_path: Required. @@ -1760,7 +1832,13 @@ async def create_vector_store_and_poll( @overload async def create_vector_store_file_batch_and_poll( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, ) -> _models.VectorStoreFileBatch: """Create a vector store file batch and poll. @@ -1788,7 +1866,7 @@ async def create_vector_store_file_batch_and_poll( content_type: str = "application/json", chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, sleep_interval: float = 1, - **kwargs: Any + **kwargs: Any, ) -> _models.VectorStoreFileBatch: """Create a vector store file batch and poll. @@ -1812,7 +1890,13 @@ async def create_vector_store_file_batch_and_poll( @overload async def create_vector_store_file_batch_and_poll( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, ) -> _models.VectorStoreFileBatch: """Create a vector store file batch and poll. @@ -1840,7 +1924,7 @@ async def create_vector_store_file_batch_and_poll( file_ids: List[str] = _Unset, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, sleep_interval: float = 1, - **kwargs: Any + **kwargs: Any, ) -> _models.VectorStoreFileBatch: """Create a vector store file batch and poll. @@ -1857,17 +1941,23 @@ async def create_vector_store_file_batch_and_poll( :rtype: ~azure.ai.client.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ - + if body is None: - vector_store_file_batch = await super().create_vector_store_file_batch(vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs) + vector_store_file_batch = await super().create_vector_store_file_batch( + vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs + ) else: - content_type = kwargs.get("content_type", "application/json") - vector_store_file_batch = await super().create_vector_store_file_batch(body=body, content_type=content_type, **kwargs) - + content_type = kwargs.get("content_type", "application/json") + vector_store_file_batch = await super().create_vector_store_file_batch( + body=body, content_type=content_type, **kwargs + ) + while vector_store_file_batch.status == "in_progress": time.sleep(sleep_interval) - vector_store_file_batch = await super().get_vector_store_file_batch(vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id) - + vector_store_file_batch = await super().get_vector_store_file_batch( + vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id + ) + return vector_store_file_batch diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py index c48d08daeb5b..7ca731b7639b 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py @@ -150,6 +150,10 @@ class ConnectionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Azure OpenAI service""" SERVERLESS = "Serverless" """Serverless API service""" + AZURE_BLOB_STORAGE = "AzureBlob" + """Azure Blob Storage""" + AI_SERVICES = "AIServices" + """Azure AI Services""" class DoneEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py index 1228a60103d1..c3e819b61daf 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py @@ -689,8 +689,8 @@ class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): :ivar auth_type: Authentication type of the connection target. Required. Entra ID authentication :vartype auth_type: str or ~azure.ai.client.models.AAD - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and - "Serverless". + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". :vartype category: str or ~azure.ai.client.models.ConnectionType :ivar target: The connection URL to be used for this service. Required. :vartype target: str @@ -699,7 +699,8 @@ class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. Entra ID authentication""" category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", + \"AzureBlob\", and \"AIServices\".""" target: str = rest_field() """The connection URL to be used for this service. Required.""" @@ -710,8 +711,8 @@ class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey :ivar auth_type: Authentication type of the connection target. Required. API Key authentication :vartype auth_type: str or ~azure.ai.client.models.API_KEY - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and - "Serverless". + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". :vartype category: str or ~azure.ai.client.models.ConnectionType :ivar credentials: Credentials will only be present for authType=ApiKey. Required. :vartype credentials: ~azure.ai.client.models._models.CredentialsApiKeyAuth @@ -722,7 +723,8 @@ class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. API Key authentication""" category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", + \"AzureBlob\", and \"AIServices\".""" credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" target: str = rest_field() @@ -736,8 +738,8 @@ class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): :ivar auth_type: Authentication type of the connection target. Required. Shared Access Signature (SAS) authentication :vartype auth_type: str or ~azure.ai.client.models.SAS - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI" and - "Serverless". + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". :vartype category: str or ~azure.ai.client.models.ConnectionType :ivar credentials: Credentials will only be present for authType=ApiKey. Required. :vartype credentials: ~azure.ai.client.models._models.CredentialsSASAuth @@ -749,7 +751,8 @@ class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): """Authentication type of the connection target. Required. Shared Access Signature (SAS) authentication""" category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\" and \"Serverless\".""" + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", + \"AzureBlob\", and \"AIServices\".""" credentials: "_models._models.CredentialsSASAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" target: str = rest_field() @@ -801,12 +804,16 @@ class ConnectionsListSecretsResponse(_model_base.Model): """Response from the listSecrets operation. + :ivar id: A unique identifier for the connection. Required. + :vartype id: str :ivar name: The name of the resource. Required. :vartype name: str :ivar properties: The properties of the resource. Required. :vartype properties: ~azure.ai.client.models._models.ConnectionProperties """ + id: str = rest_field() + """A unique identifier for the connection. Required.""" name: str = rest_field() """The name of the resource. Required.""" properties: "_models._models.ConnectionProperties" = rest_field() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index 3a5c1e56681c..7498e4e38be1 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -54,7 +54,9 @@ def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[st """ new_params = {} valid_parameters = set( - filter(lambda x: not x.startswith('_') and hasattr(model_class.__dict__[x], "_type"), model_class.__dict__.keys()) + filter( + lambda x: not x.startswith("_") and hasattr(model_class.__dict__[x], "_type"), model_class.__dict__.keys() + ) ) for k in filter(lambda x: x in valid_parameters, parameters.keys()): new_params[k] = parameters[k] @@ -75,8 +77,26 @@ def _safe_instantiate(model_class: Type, parameters: Dict[str, Any]) -> Any: class ConnectionProperties: + """The properties of a single connection. + + :ivar id: A unique identifier for the connection. + :vartype id: str + :ivar name: The friendly name of the connection. + :vartype name: str + :ivar authentication_type: The authentication type used by the connection. + :vartype authentication_type: ~azure.ai.client.models._models.AuthenticationType + :ivar connection_type: The connection type . + :vartype connection_type: ~azure.ai.client.models._models.ConnectionType + :ivar endpoint_url: The endpoint URL associated with this connection + :vartype endpoint_url: str + :ivar key: The api-key to be used when accessing the connection. + :vartype key: str + :ivar token_credential: The TokenCredential to be used when accessing the connection. + :vartype token_credential: ~azure.core.credentials.TokenCredential + """ def __init__(self, *, connection: ConnectionsListSecretsResponse, token_credential: TokenCredential = None) -> None: + self.id = connection.id self.name = connection.name self.authentication_type = connection.properties.auth_type self.connection_type = connection.properties.category @@ -94,15 +114,19 @@ def __init__(self, *, connection: ConnectionsListSecretsResponse, token_credenti def __str__(self): out = "{\n" out += f' "name": "{self.name}",\n' + out += f' "id": "{self.id}",\n' out += f' "authentication_type": "{self.authentication_type}",\n' out += f' "connection_type": "{self.connection_type}",\n' out += f' "endpoint_url": "{self.endpoint_url}",\n' - out += f' "key": "{self.key}",\n' + if self.key: + out += f' "key": "{self.key}",\n' + else: + out += f' "key": null,\n' if self.token_credential: access_token = self.token_credential.get_token("https://cognitiveservices.azure.com/.default") out += f' "token_credential": "{access_token.token}", expires on {access_token.expires_on} ({datetime.datetime.fromtimestamp(access_token.expires_on, datetime.timezone.utc)})\n' else: - out += f' "token_credential": "null"\n' + out += f' "token_credential": null\n' out += "}\n" return out @@ -729,7 +753,7 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: event_data_obj = _safe_instantiate(MessageDeltaChunk, parsed_data) elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: - event_data_obj =_safe_instantiate(RunStepDeltaChunk, parsed_data) + event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) else: event_data_obj = parsed_data diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py index 5f738c262c38..a604b3ea24f9 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py @@ -1164,7 +1164,13 @@ def build_agents_list_vector_store_file_batch_files_request( # pylint: disable= return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_connections_list_request(**kwargs: Any) -> HttpRequest: +def build_connections_list_request( + *, + category: Optional[Union[str, _models.ConnectionType]] = None, + include_all: Optional[bool] = None, + target: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -1176,6 +1182,12 @@ def build_connections_list_request(**kwargs: Any) -> HttpRequest: # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if category is not None: + _params["category"] = _SERIALIZER.query("category", category, "str") + if include_all is not None: + _params["includeAll"] = _SERIALIZER.query("include_all", include_all, "bool") + if target is not None: + _params["target"] = _SERIALIZER.query("target", target, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -1183,7 +1195,31 @@ def build_connections_list_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_connections_list_secrets_request(connection_name_in_url: str, **kwargs: Any) -> HttpRequest: +def build_connections_get_request(connection_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/connections/{connectionName}" + path_format_arguments = { + "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_connections_list_secrets_request(connection_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -1192,9 +1228,9 @@ def build_connections_list_secrets_request(connection_name_in_url: str, **kwargs accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/connections/{connectionNameInUrl}/listsecrets" + _url = "/connections/{connectionName}/listsecrets" path_format_arguments = { - "connectionNameInUrl": _SERIALIZER.url("connection_name_in_url", connection_name_in_url, "str"), + "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"), } _url: str = _url.format(**path_format_arguments) # type: ignore @@ -6278,9 +6314,24 @@ def __init__(self, *args, **kwargs): self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: # pylint: disable=protected-access + def _list( # pylint: disable=protected-access + self, + *, + category: Optional[Union[str, _models.ConnectionType]] = None, + include_all: Optional[bool] = None, + target: Optional[str] = None, + **kwargs: Any + ) -> _models._models.ConnectionsListResponse: """List the details of all the connections (not including their credentials). + :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". Default value is None. + :paramtype category: str or ~azure.ai.client.models.ConnectionType + :keyword include_all: Indicates whether to list datastores. Service default: do not list + datastores. Default value is None. + :paramtype include_all: bool + :keyword target: Target of the workspace connection. Default value is None. + :paramtype target: str :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping :rtype: ~azure.ai.client.models._models.ConnectionsListResponse :raises ~azure.core.exceptions.HttpResponseError: @@ -6299,6 +6350,9 @@ def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: # py cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) _request = build_connections_list_request( + category=category, + include_all=include_all, + target=target, api_version=self._config.api_version, headers=_headers, params=_params, @@ -6341,58 +6395,101 @@ def _list(self, **kwargs: Any) -> _models._models.ConnectionsListResponse: # py return deserialized # type: ignore + @distributed_trace + def _get( # pylint: disable=protected-access + self, connection_name: str, **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: + """Get the details of a single connection, without credentials. + + :param connection_name: Connection Name. Required. + :type connection_name: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.client.models._models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + + _request = build_connections_get_request( + connection_name=connection_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + @overload def _list_secrets( # pylint: disable=protected-access - self, connection_name_in_url: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models._models.ConnectionsListSecretsResponse: ... @overload def _list_secrets( # pylint: disable=protected-access - self, - connection_name_in_url: str, - *, - connection_name: str, - subscription_id: str, - resource_group_name: str, - workspace_name: str, - api_version_in_body: str, - content_type: str = "application/json", - **kwargs: Any + self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any ) -> _models._models.ConnectionsListSecretsResponse: ... @overload def _list_secrets( # pylint: disable=protected-access - self, connection_name_in_url: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, connection_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models._models.ConnectionsListSecretsResponse: ... @distributed_trace def _list_secrets( # pylint: disable=protected-access - self, - connection_name_in_url: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - connection_name: str = _Unset, - subscription_id: str = _Unset, - resource_group_name: str = _Unset, - workspace_name: str = _Unset, - api_version_in_body: str = _Unset, - **kwargs: Any + self, connection_name: str, body: Union[JSON, IO[bytes]] = _Unset, *, ignored: str = _Unset, **kwargs: Any ) -> _models._models.ConnectionsListSecretsResponse: - """Get the details of a single connection, including credential (if available). + """Get the details of a single connection, including credentials (if available). - :param connection_name_in_url: Connection Name. Required. - :type connection_name_in_url: str + :param connection_name: Connection Name. Required. + :type connection_name: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword connection_name: Connection Name (should be the same as the connection name in the URL - path). Required. - :paramtype connection_name: str - :keyword subscription_id: The ID of the target subscription. Required. - :paramtype subscription_id: str - :keyword resource_group_name: The name of the Resource Group. Required. - :paramtype resource_group_name: str - :keyword workspace_name: The name of the workspace (Azure AI Studio hub). Required. - :paramtype workspace_name: str - :keyword api_version_in_body: The api version. Required. - :paramtype api_version_in_body: str + :keyword ignored: The body is ignored. TODO: Can we remove this?. Required. + :paramtype ignored: str :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with MutableMapping :rtype: ~azure.ai.client.models._models.ConnectionsListSecretsResponse @@ -6413,23 +6510,9 @@ def _list_secrets( # pylint: disable=protected-access cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) if body is _Unset: - if connection_name is _Unset: - raise TypeError("missing required argument: connection_name") - if subscription_id is _Unset: - raise TypeError("missing required argument: subscription_id") - if resource_group_name is _Unset: - raise TypeError("missing required argument: resource_group_name") - if workspace_name is _Unset: - raise TypeError("missing required argument: workspace_name") - if api_version_in_body is _Unset: - raise TypeError("missing required argument: api_version_in_body") - body = { - "apiVersionInBody": api_version_in_body, - "connectionName": connection_name, - "resourceGroupName": resource_group_name, - "subscriptionId": subscription_id, - "workspaceName": workspace_name, - } + if ignored is _Unset: + raise TypeError("missing required argument: ignored") + body = {"ignored": ignored} body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -6439,7 +6522,7 @@ def _list_secrets( # pylint: disable=protected-access _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_connections_list_secrets_request( - connection_name_in_url=connection_name_in_url, + connection_name=connection_name, content_type=content_type, api_version=self._config.api_version, content=_content, diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py index 41cb15216239..b1ffba2950c4 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py @@ -1,6 +1,5 @@ # pylint: disable=too-many-lines # pylint: disable=too-many-lines -# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -46,9 +45,19 @@ class InferenceOperations: def __init__(self, outer_instance): self.outer_instance = outer_instance - def get_chat_completions_client(self) -> "ChatCompletionsClient": + @distributed_trace + def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": + """Get an authenticated ChatCompletionsClient (from the package azure-ai-inference) for the default + Serverless connection. The Serverless connection must have a Chat Completions AI model deployment. + The package `azure-ai-inference` must be installed prior to calling this method. + + :return: An authenticated chat completions client + :rtype: ~azure.ai.inference.models.ChatCompletionsClient + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) connection = self.outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, populate_secrets=True + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs ) if not connection: raise ValueError("No serverless connection found") @@ -66,7 +75,9 @@ def get_chat_completions_client(self) -> "ChatCompletionsClient": ) from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) + ) elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth logger.debug( @@ -86,9 +97,19 @@ def get_chat_completions_client(self) -> "ChatCompletionsClient": return client - def get_embeddings_client(self) -> "EmbeddingsClient": + @distributed_trace + def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": + """Get an authenticated EmbeddingsClient (from the package azure-ai-inference) for the default + Serverless connection. The Serverless connection must have a Text Embeddings AI model deployment. + The package `azure-ai-inference` must be installed prior to calling this method. + + :return: An authenticated chat completions client + :rtype: ~azure.ai.inference.models.EmbeddingsClient + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) connection = self.outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, populate_secrets=True + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs ) if not connection: raise ValueError("No serverless connection found") @@ -106,13 +127,17 @@ def get_embeddings_client(self) -> "EmbeddingsClient": ) from azure.core.credentials import AzureKeyCredential - client = EmbeddingsClient(endpoint=connection.authentication_type, credential=AzureKeyCredential(connection.key)) + client = EmbeddingsClient( + endpoint=connection.authentication_type, credential=AzureKeyCredential(connection.key) + ) elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" ) - client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=connection.properties.token_credential) + client = EmbeddingsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( @@ -124,9 +149,18 @@ def get_embeddings_client(self) -> "EmbeddingsClient": return client - def get_azure_openai_client(self) -> "AzureOpenAI": + @distributed_trace + def get_azure_openai_client(self, **kwargs) -> "AzureOpenAI": + """Get an authenticated AzureOpenAI client (from the `openai` package) for the default + Azure OpenAI connection. The package `openai` must be installed prior to calling this method. + + :return: An authenticated AzureOpenAI client + :rtype: ~openai.AzureOpenAI + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) connection = self.outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, populate_secrets=True + connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs ) if not connection: raise ValueError("No Azure OpenAI connection found") @@ -182,27 +216,56 @@ def get_azure_openai_client(self) -> "AzureOpenAI": class ConnectionsOperations(ConnectionsOperationsGenerated): - def get_default(self, *, connection_type: ConnectionType, populate_secrets: bool = False) -> ConnectionProperties: + @distributed_trace + def get_default( + self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any + ) -> ConnectionProperties: + """Get the properties of the default connection of a certain connection type, with or without + populating authentication credentials. + + :param connection_type: The connection type. Required. + :type connection_type: ~azure.ai.client.models._models.ConnectionType + :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :type with_credentials: bool + :return: The connection properties + :rtype: ~azure.ai.client.models._models.ConnectionProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) if not connection_type: raise ValueError("You must specify an connection type") - connection_properties_list = self.list(connection_type=connection_type, populate_secrets=populate_secrets) - # Since there is no notion of service default at the moment, always return the first one + # Since there is no notion of default connection at the moment, list all connections in the category + # and return the first one + connection_properties_list = self.list(connection_type=connection_type, **kwargs) if len(connection_properties_list) > 0: - return connection_properties_list[0] + if with_credentials: + return self.get( + connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs + ) + else: + return connection_properties_list[0] else: return None - def get(self, *, connection_name: str, populate_secrets: bool = False) -> ConnectionProperties: + @distributed_trace + def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: + """Get the properties of a single connection, given its connection name, with or without + populating authentication credentials. + + :param connection_name: Connection Name. Required. + :type connection_name: str + :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :type with_credentials: bool + :return: The connection properties + :rtype: ~azure.ai.client.models._models.ConnectionProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) if not connection_name: raise ValueError("Connection name cannot be empty") - if populate_secrets: + if with_credentials: connection: ConnectionsListSecretsResponse = self._list_secrets( - connection_name_in_url=connection_name, - connection_name=connection_name, - subscription_id=self._config.subscription_id, - resource_group_name=self._config.resource_group_name, - workspace_name=self._config.project_name, - api_version_in_body=self._config.api_version, + connection_name=connection_name, ignored="ignore", **kwargs ) if connection.properties.auth_type == AuthenticationType.AAD: return ConnectionProperties(connection=connection, token_credential=self._config.credential) @@ -221,27 +284,26 @@ def get(self, *, connection_name: str, populate_secrets: bool = False) -> Connec return ConnectionProperties(connection=connection) else: - internal_response: ConnectionsListResponse = self._list() - for connection in internal_response.value: - if connection_name == connection.name: - return ConnectionProperties(connection=connection) - return None + return ConnectionProperties(connection=self._get(connection_name=connection_name, **kwargs)) - def list( - self, *, connection_type: ConnectionType | None = None, populate_secrets: bool = False - ) -> Iterable[ConnectionProperties]: + @distributed_trace + def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) -> Iterable[ConnectionProperties]: + """List the properties of all connections, or all connections of a certain connection type. + + :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. + If not provided, all connections are listed. + :type connection_type: ~azure.ai.client.models._models.ConnectionType + :return: A list of connection properties + :rtype: Iterable[~azure.ai.client.models._models.ConnectionProperties] + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connections_list: ConnectionsListResponse = self._list(include_all=True, category=connection_type, **kwargs) - # First make a REST call to /list to get all the connections, without secrets - connections_list: ConnectionsListResponse = self._list() + # Iterate to create the simplified result property connection_properties_list: List[ConnectionProperties] = [] - - # Filter by connection type for connection in connections_list.value: - if connection_type is None or connection.properties.category == connection_type: - if not populate_secrets: - connection_properties_list.append(ConnectionProperties(connection=connection)) - else: - connection_properties_list.append(self.get(connection_name=connection.name, populate_secrets=True)) + connection_properties_list.append(ConnectionProperties(connection=connection)) return connection_properties_list @@ -1453,7 +1515,9 @@ def upload_file( """ @overload - def upload_file(self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any) -> _models.OpenAIFile: + def upload_file( + self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any + ) -> _models.OpenAIFile: """Uploads a file for use by other operations. :param file_path: Required. @@ -1773,7 +1837,13 @@ def create_vector_store_and_poll( @overload def create_vector_store_file_batch_and_poll( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, ) -> _models.VectorStoreFileBatch: """Create a vector store file batch and poll. @@ -1801,7 +1871,7 @@ def create_vector_store_file_batch_and_poll( content_type: str = "application/json", chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, sleep_interval: float = 1, - **kwargs: Any + **kwargs: Any, ) -> _models.VectorStoreFileBatch: """Create a vector store file batch and poll. @@ -1825,7 +1895,13 @@ def create_vector_store_file_batch_and_poll( @overload def create_vector_store_file_batch_and_poll( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, ) -> _models.VectorStoreFileBatch: """Create a vector store file batch and poll. @@ -1853,7 +1929,7 @@ def create_vector_store_file_batch_and_poll( file_ids: List[str] = _Unset, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, sleep_interval: float = 1, - **kwargs: Any + **kwargs: Any, ) -> _models.VectorStoreFileBatch: """Create a vector store file batch and poll. @@ -1870,19 +1946,25 @@ def create_vector_store_file_batch_and_poll( :rtype: ~azure.ai.client.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: """ - + if body is None: - vector_store_file_batch = super().create_vector_store_file_batch(vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs) + vector_store_file_batch = super().create_vector_store_file_batch( + vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs + ) else: - content_type = kwargs.get("content_type", "application/json") - vector_store_file_batch = super().create_vector_store_file_batch(body=body, content_type=content_type, **kwargs) - + content_type = kwargs.get("content_type", "application/json") + vector_store_file_batch = super().create_vector_store_file_batch( + body=body, content_type=content_type, **kwargs + ) + while vector_store_file_batch.status == "in_progress": time.sleep(sleep_interval) - vector_store_file_batch = super().get_vector_store_file_batch(vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id) - + vector_store_file_batch = super().get_vector_store_file_batch( + vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id + ) + return vector_store_file_batch - + __all__: List[str] = [ "AgentsOperations", diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py index 79981d834f90..c04468049742 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -34,45 +34,49 @@ async def main(): # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) async with ai_client: - + # upload a file and wait for it to be processed - file = await ai_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) + file = await ai_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) print(f"Uploaded file, file ID: {file.id}") # create a vector store with no file and wait for it to be processed vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") print(f"Created vector store, vector store ID: {vector_store.id}") - + # add the file to the vector store or you can supply file ids in the vector store creation - vector_store_file_batch = await ai_client.agents.create_vector_store_file_batch_and_poll(vector_store_id=vector_store.id, file_ids=[file.id]) + vector_store_file_batch = await ai_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, file_ids=[file.id] + ) print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") - + # create a file search tool file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file agent = await ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", + model="gpt-4-1106-preview", + name="my-assistant", instructions="You are helpful assistant", tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources + tool_resources=file_search_tool.resources, ) print(f"Created agent, agent ID: {agent.id}") thread = await ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") + print(f"Created thread, thread ID: {thread.id}") - message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?") + message = await ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) print(f"Created message, message ID: {message.id}") run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Created run, run ID: {run.id}") - + await ai_client.agents.delete_file(file.id) print("Deleted file") @@ -81,9 +85,10 @@ async def main(): await ai_client.agents.delete_agent(agent.id) print("Deleted agent") - - messages = await ai_client.agents.list_messages(thread_id=thread.id) + + messages = await ai_client.agents.list_messages(thread_id=thread.id) print(f"Messages: {messages}") + if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py index 0a18f046b551..6381168e1d12 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -41,9 +41,7 @@ async def main(): # upload a file and wait for it to be processed async with ai_client: - file = await ai_client.agents.upload_file_and_poll( - file_path="../product_info_1.md", purpose=FilePurpose.AGENTS - ) + file = await ai_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) # Create agent with file search tool agent = await ai_client.agents.create_agent( diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py index 77b3796044f3..5dea7d60b6be 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py @@ -41,6 +41,7 @@ credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) + class MyEventHandler(AgentEventHandler): def __init__(self, functions: FunctionTool) -> None: diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index 3765362d4c78..b02eac59906d 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -44,6 +44,7 @@ credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) + # When using FunctionTool with ToolSet in agent creation, the tool call events are handled inside the create_stream # method and functions gets automatically called by default. class MyEventHandler(AgentEventHandler): diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py index cfe2167697ae..f28c20ffab50 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py @@ -39,6 +39,7 @@ credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) + # Function to handle tool stream iteration def handle_submit_tool_outputs(operations: AgentsOperations, thread_id, run_id, tool_outputs): try: diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py index c5f0ac29ae4e..d40ecf5d0fe7 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -32,45 +32,49 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) with ai_client: - + # upload a file and wait for it to be processed - file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) + file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) print(f"Uploaded file, file ID: {file.id}") # create a vector store with no file and wait for it to be processed vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") print(f"Created vector store, vector store ID: {vector_store.id}") - + # add the file to the vector store or you can supply file ids in the vector store creation - vector_store_file_batch = ai_client.agents.create_vector_store_file_batch_and_poll(vector_store_id=vector_store.id, file_ids=[file.id]) + vector_store_file_batch = ai_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, file_ids=[file.id] + ) print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") - + # create a file search tool file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", + model="gpt-4-1106-preview", + name="my-assistant", instructions="You are helpful assistant", tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources + tool_resources=file_search_tool.resources, ) print(f"Created agent, agent ID: {agent.id}") thread = ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") + print(f"Created thread, thread ID: {thread.id}") - message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?") + message = ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) print(f"Created message, message ID: {message.id}") run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Created run, run ID: {run.id}") - + ai_client.agents.delete_file(file.id) print("Deleted file") @@ -79,6 +83,6 @@ ai_client.agents.delete_agent(agent.id) print("Deleted agent") - - messages = ai_client.agents.list_messages(thread_id=thread.id) + + messages = ai_client.agents.list_messages(thread_id=thread.id) print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py index f1950b6ed07e..23644a3cdb14 100644 --- a/sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py @@ -27,10 +27,9 @@ from azure.ai.client.models import ConnectionType, AuthenticationType from azure.identity import DefaultAzureCredential + async def sample_connections_async(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # It should be in the format ";;;" ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"], @@ -38,35 +37,36 @@ async def sample_connections_async(): async with ai_client: - # List all connections: - print("====> Listing of all connections:") - async for connection in ai_client.connections.list(): + # List the properties of all connections + connections = await ai_client.connections.list() + print(f"====> Listing of all connections (found {len(connections)}):") + for connection in connections: print(connection) - # List all connections of a particular "type", with or without their credentials: - print("====> Listing of all Azure Open AI connections:") - async for connection in ai_client.connections.list( - connection_type=ConnectionType.AZURE_OPEN_AI, # Optional. Defaults to all types. - populate_secrets=True, # Optional. Defaults to "False" - ): + # List the properties of all connections of a particular "type" (In this sample, Azure OpenAI connections) + connections = await ai_client.connections.list( + connection_type=ConnectionType.AZURE_OPEN_AI, + ) + print("====> Listing of all Azure Open AI connections (found {len(connections)}):") + for connection in connections: print(connection) - # Get the default connection of a particular "type": + # Get the properties of the default connection of a particular "type", with credentials connection = await ai_client.connections.get_default( connection_type=ConnectionType.AZURE_OPEN_AI, - populate_secrets=True, # Required. # Optional. Defaults to "False" + with_credentials=True, # Optional. Defaults to "False" ) print("====> Get default Azure Open AI connection:") print(connection) - # Get a connection by its name: + # Get the properties of a connection by connection name: connection = await ai_client.connections.get( - connection_name=os.environ["AI_CLIENT_CONNECTION_NAME"], populate_secrets=True # Required. + connection_name=os.environ["AI_CLIENT_CONNECTION_NAME"], + with_credentials=True, # Optional. Defaults to "False" ) print("====> Get connection by name:") print(connection) - # Examples of how you would create Inference client if connection.connection_type == ConnectionType.AZURE_OPEN_AI: @@ -95,7 +95,7 @@ async def sample_connections_async(): raise ValueError(f"Authentication type {connection.authentication_type} not supported.") response = await client.chat.completions.create( - model="gpt-4o", + model="gpt-4-0613", messages=[ { "role": "user", @@ -114,7 +114,9 @@ async def sample_connections_async(): print("====> Creating ChatCompletionsClient using API key authentication") from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) + ) elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") diff --git a/sdk/ai/azure-ai-client/samples/connections/sample_connections.py b/sdk/ai/azure-ai-client/samples/connections/sample_connections.py index 0ed10ac77017..a5ce848c110b 100644 --- a/sdk/ai/azure-ai-client/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-client/samples/connections/sample_connections.py @@ -30,44 +30,42 @@ from azure.identity import DefaultAzureCredential, get_bearer_token_provider from azure.core.credentials import AzureKeyCredential -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# It should be in the format ";;;" ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) with ai_client: - - # List all connections + # List the properties of all connections connections = ai_client.connections.list() print(f"====> Listing of all connections (found {len(connections)}):") for connection in connections: print(connection) - # List all connections of a particular "type", with or without their credentials: + # List the properties of all connections of a particular "type" (In this sample, Azure OpenAI connections) connections = ai_client.connections.list( - connection_type=ConnectionType.AZURE_OPEN_AI, # Optional. Defaults to all types. - populate_secrets=True, # Optional. Defaults to "False" + connection_type=ConnectionType.AZURE_OPEN_AI, ) print("====> Listing of all Azure Open AI connections (found {len(connections)}):") for connection in connections: print(connection) - # Get the default connection of a particular "type": + # Get the properties of the default connection of a particular "type", with credentials connection = ai_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, populate_secrets=True # Required. # Optional. Defaults to "False" + connection_type=ConnectionType.AZURE_OPEN_AI, + with_credentials=True, # Optional. Defaults to "False" ) print("====> Get default Azure Open AI connection:") print(connection) - - # Get a connection by name: + + # Get the properties of a connection by connection name: connection = ai_client.connections.get( - connection_name=os.environ["AI_CLIENT_CONNECTION_NAME"], populate_secrets=True # Required. + connection_name=os.environ["AI_CLIENT_CONNECTION_NAME"], with_credentials=True # Optional. Defaults to "False" ) print("====> Get connection by name:") print(connection) + # Examples of how you would create Inference client if connection.connection_type == ConnectionType.AZURE_OPEN_AI: @@ -92,7 +90,7 @@ raise ValueError(f"Authentication type {connection.authentication_type} not supported.") response = client.chat.completions.create( - model="gpt-4o", + model="gpt-4-0613", messages=[ { "role": "user", @@ -111,7 +109,9 @@ elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.properties.token_credential) + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") diff --git a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py index 7361c8d0f4f0..30862aa8c879 100644 --- a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py +++ b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py @@ -118,61 +118,66 @@ def test_sas_token_credential_class_real(self, **kwargs): print(f"[TEST] Actual expiration date: {sas_token_credential._expires_on}") assert sas_token_credential._expires_on == expiration_datatime_utc - @pytest.mark.parametrize( - 'valid_params,model_cls', + "valid_params,model_cls", [ - ({ - 'id': '12345', - 'object': "thread.run", - 'thread_id': "6789", - 'assistant_id': "101112", - 'status': 'in_progress', - 'required_action': 'test', - 'last_error': 'none', - 'model': 'gpt-4', - 'instructions': "Test instruction", - 'tools': 'Test function', - 'created_at': datetime.datetime(2024, 11, 14), - 'expires_at': datetime.datetime(2024, 11, 17), - 'started_at': datetime.datetime(2024, 11, 15), - 'completed_at': datetime.datetime(2024, 11, 16), - 'cancelled_at': datetime.datetime(2024, 11, 16), - 'failed_at': datetime.datetime(2024, 11, 16), - 'incomplete_details': 'max_completion_tokens', - 'usage': 'in_progress', - 'temperature': 1.0, - 'top_p': 1.0, - 'max_completion_tokens': 1000, - 'truncation_strategy': 'test', - 'tool_choice': "tool name", - 'response_format': "json", - 'metadata': {'foo': 'bar'}, - 'tool_resources': "test", - 'parallel_tool_calls': True - }, ThreadRun), - ({ - 'id': '1233', - 'object': 'thread.message', - 'created_at': datetime.datetime(2024, 11, 14), - 'thread_id': '5678', - 'status': 'incomplete', - 'incomplete_details': "test", - 'completed_at': datetime.datetime(2024, 11, 16), - 'incomplete_at': datetime.datetime(2024, 11, 16), - 'role': 'assistant', - 'content': 'Test', - 'assistant_id': '9911', - 'run_id': '11', - 'attachments': ['4', '8', '15', '16', '23', '42'], - 'metadata': {'foo', 'bar'} - }, ThreadMessage) - ] + ( + { + "id": "12345", + "object": "thread.run", + "thread_id": "6789", + "assistant_id": "101112", + "status": "in_progress", + "required_action": "test", + "last_error": "none", + "model": "gpt-4", + "instructions": "Test instruction", + "tools": "Test function", + "created_at": datetime.datetime(2024, 11, 14), + "expires_at": datetime.datetime(2024, 11, 17), + "started_at": datetime.datetime(2024, 11, 15), + "completed_at": datetime.datetime(2024, 11, 16), + "cancelled_at": datetime.datetime(2024, 11, 16), + "failed_at": datetime.datetime(2024, 11, 16), + "incomplete_details": "max_completion_tokens", + "usage": "in_progress", + "temperature": 1.0, + "top_p": 1.0, + "max_completion_tokens": 1000, + "truncation_strategy": "test", + "tool_choice": "tool name", + "response_format": "json", + "metadata": {"foo": "bar"}, + "tool_resources": "test", + "parallel_tool_calls": True, + }, + ThreadRun, + ), + ( + { + "id": "1233", + "object": "thread.message", + "created_at": datetime.datetime(2024, 11, 14), + "thread_id": "5678", + "status": "incomplete", + "incomplete_details": "test", + "completed_at": datetime.datetime(2024, 11, 16), + "incomplete_at": datetime.datetime(2024, 11, 16), + "role": "assistant", + "content": "Test", + "assistant_id": "9911", + "run_id": "11", + "attachments": ["4", "8", "15", "16", "23", "42"], + "metadata": {"foo", "bar"}, + }, + ThreadMessage, + ), + ], ) def test_correct_thread_params(self, valid_params, model_cls): """Test that if service returned extra parameter in SSE response, it does not create issues.""" - - bad_params = {'foo': 'bar'} + + bad_params = {"foo": "bar"} params = copy.deepcopy(valid_params) params.update(bad_params) # We should bot e able to create Thread Run with bad parameters. @@ -187,7 +192,6 @@ def test_correct_thread_params(self, valid_params, model_cls): model_cls(**filtered_params) # Check safe initialization. assert isinstance(_safe_instantiate(model_cls, params), model_cls) - def test_safe_instantiate_non_dict(self): """Test that safe_instantiate method when user supplies not a dictionary.""" diff --git a/sdk/ai/azure-ai-client/tsp-location.yaml b/sdk/ai/azure-ai-client/tsp-location.yaml index df07cabfc8cf..4b3b80fb4e8a 100644 --- a/sdk/ai/azure-ai-client/tsp-location.yaml +++ b/sdk/ai/azure-ai-client/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Client -commit: 5e47aaf15376915f221a0527f344733ce7f924cb +commit: 0c04c10b19c71ca88bcfe42015d4de9ad027764b repo: Azure/azure-rest-api-specs additionalDirectories: From 065849b9a5e933afa88910f79bbb01afffbe2079 Mon Sep 17 00:00:00 2001 From: Ankit Singhal <30610298+singankit@users.noreply.github.com> Date: Tue, 22 Oct 2024 11:50:14 -0700 Subject: [PATCH 042/138] Users/singankit/ai project utils (#38033) * Sample upload file method on AI Client * Updating sample evaluation * Improving Evaluation user experience * Adding pip install link for evaluation * Update sample_evaluations.py * Reverting connection changes * Update _patch.py --- .../azure-ai-client/azure/ai/client/_patch.py | 48 ++++++- .../azure/ai/client/models/_patch.py | 25 +++- .../evaluations/evaluate_test_data.jsonl | 3 + .../samples/evaluations/sample_evaluations.py | 120 ++++++++---------- 4 files changed, 129 insertions(+), 67 deletions(-) create mode 100644 sdk/ai/azure-ai-client/samples/evaluations/evaluate_test_data.jsonl diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py index b0add1c9c19b..3ca93bf89165 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py @@ -6,7 +6,10 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List, Any +import uuid +from os import PathLike +from pathlib import Path +from typing import List, Any, Union, Dict from typing_extensions import Self from azure.core.credentials import TokenCredential from azure.core import PipelineClient @@ -185,6 +188,49 @@ def from_connection_string(cls, conn_str: str, credential: "TokenCredential", ** project_name = parts[3] return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) + def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: + """Upload a file to the Azure AI Studio project. + This method required *azure-ai-ml* to be installed. + + :param file_path: The path to the file to upload. + :type file_path: Union[str, Path, PathLike] + :return: The asset id of uploaded file. + :rtype: str + """ + try: + from azure.ai.ml import MLClient + from azure.ai.ml.entities import Data + from azure.ai.ml.constants import AssetTypes + except ImportError: + raise ImportError( + "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`") + + data = Data( + path=file_path, + type=AssetTypes.URI_FILE, + name=str(uuid.uuid4()), # generating random name + is_anonymous=True, + version="1", + ) + + ml_client = MLClient( + self._config3.credential, + self._config3.subscription_id, + self._config3.resource_group_name, + self._config3.project_name, + ) + + data_asset = ml_client.data.create_or_update(data) + + return data_asset.id + + @property + def scope(self) -> Dict[str, str]: + return { + "subscription_id": self._config3.subscription_id, + "resource_group_name": self._config3.resource_group_name, + "project_name": self._config3.project_name, + } __all__: List[str] = [ "AzureAIClient", diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index 7498e4e38be1..fbc689a7b5cd 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -15,7 +15,7 @@ from azure.core.credentials import TokenCredential, AccessToken -from ._enums import AgentStreamEvent +from ._enums import AgentStreamEvent, ConnectionType from ._models import ( ConnectionsListSecretsResponse, MessageDeltaChunk, @@ -33,6 +33,7 @@ CodeInterpreterToolDefinition, CodeInterpreterToolResource, RequiredFunctionToolCall, + ConnectionType, ) from abc import ABC, abstractmethod @@ -111,6 +112,28 @@ def __init__(self, *, connection: ConnectionsListSecretsResponse, token_credenti self.key = connection.properties.credentials.key self.token_credential = token_credential + def to_evaluator_model_config(self, deployment_name, api_version) -> Dict[str, str]: + connection_type = self.connection_type.value + if self.connection_type.value == ConnectionType.AZURE_OPEN_AI: + connection_type = "azure_openai" + + if self.authentication_type == "ApiKey": + model_config = { + "azure_deployment": deployment_name, + "azure_endpoint": self.endpoint_url, + "type": connection_type, + "api_version": api_version, + "api_key": f"{self.id}/credentials/key", + } + else: + model_config = { + "azure_deployment": deployment_name, + "azure_endpoint": self.endpoint_url, + "type": self.connection_type, + "api_version": api_version, + } + return model_config + def __str__(self): out = "{\n" out += f' "name": "{self.name}",\n' diff --git a/sdk/ai/azure-ai-client/samples/evaluations/evaluate_test_data.jsonl b/sdk/ai/azure-ai-client/samples/evaluations/evaluate_test_data.jsonl new file mode 100644 index 000000000000..0396a22c2db6 --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/evaluations/evaluate_test_data.jsonl @@ -0,0 +1,3 @@ +{"query":"How do you create a run?","context":"AML API only","response":"To create a run using the Azure Machine Learning API, you first need to create an Experiment. Once you have an experiment, you can create a Run object that is associated with that experiment. Here is some Python code that demonstrates this process:\n\n```\nfrom azureml.core import Experiment, Run\nfrom azureml.core.workspace import Workspace\n\n# Define workspace and experiment\nws = Workspace.from_config()\nexp = Experiment(workspace=ws, name='my_experiment')\n\n# Create a new run\nrun = exp.start_logging()\n```\n\nIn this code, the `from_config()` method reads the configuration file that you created when you set up your Azure Machine Learning workspace. The `Experiment` constructor creates an Experiment object that is associated with your workspace, and the `start_logging()` method creates a new Run object that is associated with the Experiment. Now you can use the `run` object to log metrics, upload files, and track other information related to your machine learning experiment.", "ground_truth":"Paris is the capital of France."} +{"query":"How do you log a model?","context":"Logging can be done using any OSS Sdk","response":"There are a few ways to log models in Azure Machine Learning. \n\nOne way is to use the `register_model()` method of the `Run` object. The `register_model()` method logs a model file in the Azure Machine Learning service workspace and makes it available for deployment. Here's an example:\n\n```python\nfrom azureml.core import Model\n\nmodel_path = '.\/outputs\/my_model.pkl'\nmodel = Model.register(workspace=ws, model_path=model_path, model_name='my_model')\n```\n\nThis code registers the model file located at `model_path` to the Azure Machine Learning service workspace with the name `my_model`. \n\nAnother way to log a model is to save it as an output of a `Run`. If your model generation code is part of a script or Jupyter notebook that runs as an Azure Machine Learning experiment, you can save the model file as an output of the `Run` object. Here's an example:\n\n```python\nfrom sklearn.linear_model import LogisticRegression\nfrom azureml.core.run import Run\n\n# Initialize a run object\nrun = Run.get_context()\n\n# Train your model\nX_train, y_train = ...\nclf = LogisticRegression().fit(X_train, y_train)\n\n# Save the model to the Run object's outputs directory\nmodel_path = 'outputs\/model.pkl'\njoblib.dump(value=clf, filename=model_path)\n\n# Log the model as a run artifact\nrun.upload_file(name=model_path, path_or_stream=model_path)\n```\n\nIn this code, `Run.get_context()` retrieves the current run context object, which you can use to track metadata and metrics for the run. After training your model, you can use `joblib.dump()` to save the model to a file, and then log the file as an artifact of the run using `run.upload_file()`.","ground_truth":"Paris is the capital of France."} +{"query":"What is the capital of France?","context":"France is in Europe","response":"Paris is the capital of France.", "ground_truth":"Paris is the capital of France."} diff --git a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py index 5f30c2717dda..63230a4ce8e8 100644 --- a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py +++ b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py @@ -1,48 +1,72 @@ -import os -from pprint import pprint +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ -from azure.ai.client import AzureAIClient -from azure.identity import DefaultAzureCredential +""" +FILE: sample_agents_basics.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_evaluations.py + + Before running the sample: -from azure.ai.client.models import Evaluation, Dataset, EvaluatorConfiguration + pip install azure-identity + pip install "git+https://github.com/Azure/azure-sdk-for-python.git@users/singankit/ai_project_utils#egg=azure-ai-client&subdirectory=sdk/ai/azure-ai-client" + pip install "git+https://github.com/Azure/azure-sdk-for-python.git@users/singankit/demo_evaluators_id#egg=azure-ai-evaluation&subdirectory=sdk/evaluation/azure-ai-evaluation" + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" -# Project Configuration Canary -Subscription = "2d385bf4-0756-4a76-aa95-28bf9ed3b625" -ResourceGroup = "rg-anksingai" -Workspace = "anksing-canary" -DataUri = "azureml://locations/eastus2euap/workspaces/a51c1ea7-5c29-4c32-a98e-7fa752f36e7c/data/test-remote-eval-data/versions/1" -Endpoint = "https://eastus2euap.api.azureml.ms" +import os, time +from azure.ai.client import AzureAIClient +from azure.identity import DefaultAzureCredential +from azure.ai.client.models import Evaluation, Dataset, EvaluatorConfiguration, ConnectionType +from azure.ai.evaluation import F1ScoreEvaluator, RelevanceEvaluator, HateUnfairnessEvaluator + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables -# Create an Azure AI client ai_client = AzureAIClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=f"{Endpoint};{Subscription};{ResourceGroup};{Workspace}", - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) +# Upload data for evaluation +# Service side fix needed to make this work +# data_id = ai_client.upload_file("./evaluate_test_data.jsonl") +data_id = "azureml://locations/eastus2/workspaces/faa79f3d-91b3-4ed5-afdc-4cc0fe13fb85/data/remote-evals-data/versions/3" + +default_connection = ai_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) + + + # Create an evaluation evaluation = Evaluation( display_name="Remote Evaluation", description="Evaluation of dataset", - data=Dataset(id=DataUri), + data=Dataset(id=data_id), evaluators={ "f1_score": EvaluatorConfiguration( - id="azureml://registries/jamahaja-evals-registry/models/F1ScoreEvaluator/versions/1" + id=F1ScoreEvaluator.evaluator_id, ), "relevance": EvaluatorConfiguration( - id="azureml://registries/jamahaja-evals-registry/models/Relevance-Evaluator-AI-Evaluation/versions/2", + id=RelevanceEvaluator.evaluator_id, + init_params={ + "model_config": default_connection.to_evaluator_model_config(deployment_name="GPT-4-Prod", api_version="2024-08-01-preview") + }, + ), + "hate_unfairness": EvaluatorConfiguration( + id=HateUnfairnessEvaluator.evaluator_id, init_params={ - "model_config": { - "api_key": "/subscriptions/2d385bf4-0756-4a76-aa95-28bf9ed3b625/resourceGroups/rg-anksingai/providers/Microsoft.MachineLearningServices/workspaces/anksing-canary/connections/ai-anksingai0771286510468288/credentials/key", - "azure_deployment": "gpt-4", - "api_version": "2023-07-01-preview", - "azure_endpoint": "https://ai-anksingai0771286510468288.openai.azure.com/", - } + "azure_ai_project": ai_client.scope }, - # data_mapping= { - # "response": "${data.answer}", "query": "${data.question}" - # } ), }, # This is needed as a workaround until environment gets published to registry @@ -54,45 +78,11 @@ evaluation=evaluation, ) -pprint(evaluation_response.as_dict()) - # Get evaluation get_evaluation_response = ai_client.evaluations.get(evaluation_response.id) -pprint(get_evaluation_response.as_dict()) - - -evaluation_json = { - "Data": {"Uri": DataUri}, - "DisplayName": "Remote Evaluation", - "Description": "Testing", - # "Environment": "azureml://registries/jamahaja-evals-registry/environments/eval-remote-env/versions/2", - "Evaluators": { - "f1_score": {"Id": "azureml://registries/jamahaja-evals-registry/models/F1ScoreEvaluator/versions/1"}, - "relevance": { - "Id": "azureml://registries/jamahaja-evals-registry/models/Relevance-Evaluator-AI-Evaluation/versions/1", - "initParams": { - "model_config": { - "api_key": "/subscriptions/2d385bf4-0756-4a76-aa95-28bf9ed3b625/resourceGroups/rg-anksingai/providers/Microsoft.MachineLearningServices/workspaces/anksing-canary/connections/ai-anksingaicanary931822963616_aoai/credentials/key", - "azure_deployment": "gpt-4", - "api_version": "2023-07-01-preview", - "azure_endpoint": "https://ai-anksingaicanary931822963616.openai.azure.com/", - } - }, - }, - "hate_unfairness": { - "Id": "azureml://registries/jamahaja-evals-registry/models/HateUnfairnessEvaluator/versions/2", - "initParams": { - "azure_ai_project": { - "subscription_id": "2d385bf4-0756-4a76-aa95-28bf9ed3b625", - "resource_group_name": "rg-anksingai", - "workspace_name": "anksing-canary", - } - }, - }, - }, - "properties": { - "Environment": "azureml://registries/jamahaja-evals-registry/environments/eval-remote-env/versions/6", - # "_azureml.evaluation_run": "promptflow.BatchRun" - }, -} +print("----------------------------------------------------------------") +print("Created evaluation, evaluation ID: ", get_evaluation_response.id) +print("Evaluation status: ", get_evaluation_response.status) +print("AI Studio URI: ", get_evaluation_response.properties["AiStudioEvaluationUri"]) +print("----------------------------------------------------------------") From 99e8ecdd1023e8a950ffd130cd820a9977090cb4 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 22 Oct 2024 12:29:52 -0700 Subject: [PATCH 043/138] First emit of azure-ai-project (_patch.py, samples and test not yet copied) --- sdk/ai/azure-ai-project/CHANGELOG.md | 5 + sdk/ai/azure-ai-project/LICENSE | 21 + sdk/ai/azure-ai-project/MANIFEST.in | 7 + sdk/ai/azure-ai-project/README.md | 80 + sdk/ai/azure-ai-project/azure/__init__.py | 1 + sdk/ai/azure-ai-project/azure/ai/__init__.py | 1 + .../azure/ai/project/__init__.py | 32 + .../azure/ai/project/_client.py | 137 + .../azure/ai/project/_configuration.py | 91 + .../azure/ai/project/_model_base.py | 1159 +++ .../azure/ai/project/_patch.py | 20 + .../azure/ai/project/_serialization.py | 2114 +++++ .../azure/ai/project/_types.py | 18 + .../azure/ai/project/_vendor.py | 50 + .../azure/ai/project/_version.py | 9 + .../azure/ai/project/aio/__init__.py | 29 + .../azure/ai/project/aio/_client.py | 139 + .../azure/ai/project/aio/_configuration.py | 91 + .../azure/ai/project/aio/_patch.py | 20 + .../ai/project/aio/operations/__init__.py | 29 + .../ai/project/aio/operations/_operations.py | 6049 ++++++++++++++ .../azure/ai/project/aio/operations/_patch.py | 20 + .../azure/ai/project/models/__init__.py | 376 + .../azure/ai/project/models/_enums.py | 513 ++ .../azure/ai/project/models/_models.py | 6105 ++++++++++++++ .../azure/ai/project/models/_patch.py | 20 + .../azure/ai/project/operations/__init__.py | 29 + .../ai/project/operations/_operations.py | 7396 +++++++++++++++++ .../azure/ai/project/operations/_patch.py | 20 + .../azure/ai/project/py.typed | 1 + sdk/ai/azure-ai-project/dev_requirements.txt | 4 + sdk/ai/azure-ai-project/setup.py | 71 + sdk/ai/azure-ai-project/tsp-location.yaml | 4 + 33 files changed, 24661 insertions(+) create mode 100644 sdk/ai/azure-ai-project/CHANGELOG.md create mode 100644 sdk/ai/azure-ai-project/LICENSE create mode 100644 sdk/ai/azure-ai-project/MANIFEST.in create mode 100644 sdk/ai/azure-ai-project/README.md create mode 100644 sdk/ai/azure-ai-project/azure/__init__.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/__init__.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/__init__.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_client.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_configuration.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_model_base.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_patch.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_serialization.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_types.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_vendor.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_version.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/__init__.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/_client.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/_configuration.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/_patch.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/operations/__init__.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_operations.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_patch.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/models/__init__.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/models/_enums.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/models/_models.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/operations/__init__.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/operations/_operations.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/operations/_patch.py create mode 100644 sdk/ai/azure-ai-project/azure/ai/project/py.typed create mode 100644 sdk/ai/azure-ai-project/dev_requirements.txt create mode 100644 sdk/ai/azure-ai-project/setup.py create mode 100644 sdk/ai/azure-ai-project/tsp-location.yaml diff --git a/sdk/ai/azure-ai-project/CHANGELOG.md b/sdk/ai/azure-ai-project/CHANGELOG.md new file mode 100644 index 000000000000..628743d283a9 --- /dev/null +++ b/sdk/ai/azure-ai-project/CHANGELOG.md @@ -0,0 +1,5 @@ +# Release History + +## 1.0.0b1 (1970-01-01) + +- Initial version diff --git a/sdk/ai/azure-ai-project/LICENSE b/sdk/ai/azure-ai-project/LICENSE new file mode 100644 index 000000000000..63447fd8bbbf --- /dev/null +++ b/sdk/ai/azure-ai-project/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) Microsoft Corporation. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/MANIFEST.in b/sdk/ai/azure-ai-project/MANIFEST.in new file mode 100644 index 000000000000..1486c4804328 --- /dev/null +++ b/sdk/ai/azure-ai-project/MANIFEST.in @@ -0,0 +1,7 @@ +include *.md +include LICENSE +include azure/ai/project/py.typed +recursive-include tests *.py +recursive-include samples *.py *.md +include azure/__init__.py +include azure/ai/__init__.py \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/README.md b/sdk/ai/azure-ai-project/README.md new file mode 100644 index 000000000000..1076c7ef0670 --- /dev/null +++ b/sdk/ai/azure-ai-project/README.md @@ -0,0 +1,80 @@ + + +# Azure Ai Project client library for Python + + +## Getting started + +### Install the package + +```bash +python -m pip install azure-ai-project +``` + +#### Prequisites + +- Python 3.8 or later is required to use this package. +- You need an [Azure subscription][azure_sub] to use this package. +- An existing Azure Ai Project instance. +#### Create with an Azure Active Directory Credential +To use an [Azure Active Directory (AAD) token credential][authenticate_with_token], +provide an instance of the desired credential type obtained from the +[azure-identity][azure_identity_credentials] library. + +To authenticate with AAD, you must first [pip][pip] install [`azure-identity`][azure_identity_pip] + +After setup, you can choose which type of [credential][azure_identity_credentials] from azure.identity to use. +As an example, [DefaultAzureCredential][default_azure_credential] can be used to authenticate the client: + +Set the values of the client ID, tenant ID, and client secret of the AAD application as environment variables: +`AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, `AZURE_CLIENT_SECRET` + +Use the returned token credential to authenticate the client: + +```python +>>> from azure.ai.project import AIProjectClient +>>> from azure.identity import DefaultAzureCredential +>>> client = AIProjectClient(endpoint='', credential=DefaultAzureCredential()) +``` + +## Examples + +```python +>>> from azure.ai.project import AIProjectClient +>>> from azure.identity import DefaultAzureCredential +>>> from azure.core.exceptions import HttpResponseError + +>>> client = AIProjectClient(endpoint='', credential=DefaultAzureCredential()) +>>> try: + + except HttpResponseError as e: + print('service responds error: {}'.format(e.response.json())) + +``` + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +see the Code of Conduct FAQ or contact opencode@microsoft.com with any +additional questions or comments. + + +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token +[azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials +[azure_identity_pip]: https://pypi.org/project/azure-identity/ +[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential +[pip]: https://pypi.org/project/pip/ +[azure_sub]: https://azure.microsoft.com/free/ + diff --git a/sdk/ai/azure-ai-project/azure/__init__.py b/sdk/ai/azure-ai-project/azure/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-project/azure/ai/__init__.py b/sdk/ai/azure-ai-project/azure/ai/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-project/azure/ai/project/__init__.py b/sdk/ai/azure-ai-project/azure/ai/project/__init__.py new file mode 100644 index 000000000000..743119593f69 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/__init__.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import AIProjectClient # type: ignore +from ._version import VERSION + +__version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AIProjectClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_client.py b/sdk/ai/azure-ai-project/azure/ai/project/_client.py new file mode 100644 index 000000000000..84a19d80cde9 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/_client.py @@ -0,0 +1,137 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, TYPE_CHECKING +from typing_extensions import Self + +from azure.core import PipelineClient +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse + +from ._configuration import AIProjectClientConfiguration +from ._serialization import Deserializer, Serializer +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + + +class AIProjectClient: + """AIProjectClient. + + :ivar agents: AgentsOperations operations + :vartype agents: azure.ai.project.operations.AgentsOperations + :ivar connections: ConnectionsOperations operations + :vartype connections: azure.ai.project.operations.ConnectionsOperations + :ivar evaluations: EvaluationsOperations operations + :vartype evaluations: azure.ai.project.operations.EvaluationsOperations + :param endpoint: The Azure AI Studio project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``\\\\ , where + :code:`` is the Azure region where the project is deployed (e.g. westus) and + :code:`` is the GUID of the Enterprise private link. Required. + :type endpoint: str + :param subscription_id: The Azure subscription ID. Required. + :type subscription_id: str + :param resource_group_name: The name of the Azure Resource Group. Required. + :type resource_group_name: str + :param project_name: The Azure AI Studio project name. Required. + :type project_name: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-07-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: "TokenCredential", + **kwargs: Any + ) -> None: + _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" # pylint: disable=line-too-long + self._config = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + **kwargs + ) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) + self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) + self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> Self: + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_configuration.py b/sdk/ai/azure-ai-project/azure/ai/project/_configuration.py new file mode 100644 index 000000000000..f39a4dfc1f76 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/_configuration.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline import policies + +from ._version import VERSION + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + + +class AIProjectClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AIProjectClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: The Azure AI Studio project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``\\ , where :code:`` + is the Azure region where the project is deployed (e.g. westus) and :code:`` + is the GUID of the Enterprise private link. Required. + :type endpoint: str + :param subscription_id: The Azure subscription ID. Required. + :type subscription_id: str + :param resource_group_name: The name of the Azure Resource Group. Required. + :type resource_group_name: str + :param project_name: The Azure AI Studio project name. Required. + :type project_name: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-07-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: "TokenCredential", + **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "2024-07-01-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + if resource_group_name is None: + raise ValueError("Parameter 'resource_group_name' must not be None.") + if project_name is None: + raise ValueError("Parameter 'project_name' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.subscription_id = subscription_id + self.resource_group_name = resource_group_name + self.project_name = project_name + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-project/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.BearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_model_base.py b/sdk/ai/azure-ai-project/azure/ai/project/_model_base.py new file mode 100644 index 000000000000..e6a2730f9276 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/_model_base.py @@ -0,0 +1,1159 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access, broad-except + +import copy +import calendar +import decimal +import functools +import sys +import logging +import base64 +import re +import typing +import enum +import email.utils +from datetime import datetime, date, time, timedelta, timezone +from json import JSONEncoder +import xml.etree.ElementTree as ET +from typing_extensions import Self +import isodate +from azure.core.exceptions import DeserializationError +from azure.core import CaseInsensitiveEnumMeta +from azure.core.pipeline import PipelineResponse +from azure.core.serialization import _Null + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping + +_LOGGER = logging.getLogger(__name__) + +__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] + +TZ_UTC = timezone.utc +_T = typing.TypeVar("_T") + + +def _timedelta_as_isostr(td: timedelta) -> str: + """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' + + Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython + + :param timedelta td: The timedelta to convert + :rtype: str + :return: ISO8601 version of this timedelta + """ + + # Split seconds to larger units + seconds = td.total_seconds() + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + + days, hours, minutes = list(map(int, (days, hours, minutes))) + seconds = round(seconds, 6) + + # Build date + date_str = "" + if days: + date_str = "%sD" % days + + if hours or minutes or seconds: + # Build time + time_str = "T" + + # Hours + bigger_exists = date_str or hours + if bigger_exists: + time_str += "{:02}H".format(hours) + + # Minutes + bigger_exists = bigger_exists or minutes + if bigger_exists: + time_str += "{:02}M".format(minutes) + + # Seconds + try: + if seconds.is_integer(): + seconds_string = "{:02}".format(int(seconds)) + else: + # 9 chars long w/ leading 0, 6 digits after decimal + seconds_string = "%09.6f" % seconds + # Remove trailing zeros + seconds_string = seconds_string.rstrip("0") + except AttributeError: # int.is_integer() raises + seconds_string = "{:02}".format(seconds) + + time_str += "{}S".format(seconds_string) + else: + time_str = "" + + return "P" + date_str + time_str + + +def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: + encoded = base64.b64encode(o).decode() + if format == "base64url": + return encoded.strip("=").replace("+", "-").replace("/", "_") + return encoded + + +def _serialize_datetime(o, format: typing.Optional[str] = None): + if hasattr(o, "year") and hasattr(o, "hour"): + if format == "rfc7231": + return email.utils.format_datetime(o, usegmt=True) + if format == "unix-timestamp": + return int(calendar.timegm(o.utctimetuple())) + + # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) + if not o.tzinfo: + iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() + else: + iso_formatted = o.astimezone(TZ_UTC).isoformat() + # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) + return iso_formatted.replace("+00:00", "Z") + # Next try datetime.date or datetime.time + return o.isoformat() + + +def _is_readonly(p): + try: + return p._visibility == ["read"] + except AttributeError: + return False + + +class SdkJSONEncoder(JSONEncoder): + """A JSON encoder that's capable of serializing datetime objects and bytes.""" + + def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): + super().__init__(*args, **kwargs) + self.exclude_readonly = exclude_readonly + self.format = format + + def default(self, o): # pylint: disable=too-many-return-statements + if _is_model(o): + if self.exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + return {k: v for k, v in o.items() if k not in readonly_props} + return dict(o.items()) + try: + return super(SdkJSONEncoder, self).default(o) + except TypeError: + if isinstance(o, _Null): + return None + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, self.format) + try: + # First try datetime.datetime + return _serialize_datetime(o, self.format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return super(SdkJSONEncoder, self).default(o) + + +_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") +_VALID_RFC7231 = re.compile( + r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" + r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" +) + + +def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + attr = attr.upper() + match = _VALID_DATE.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + return date_obj + + +def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize RFC7231 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + match = _VALID_RFC7231.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + return email.utils.parsedate_to_datetime(attr) + + +def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: + """Deserialize unix timestamp into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + return datetime.fromtimestamp(attr, TZ_UTC) + + +def _deserialize_date(attr: typing.Union[str, date]) -> date: + """Deserialize ISO-8601 formatted string into Date object. + :param str attr: response string to be deserialized. + :rtype: date + :returns: The date object from that input + """ + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + if isinstance(attr, date): + return attr + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore + + +def _deserialize_time(attr: typing.Union[str, time]) -> time: + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :returns: The time object from that input + """ + if isinstance(attr, time): + return attr + return isodate.parse_time(attr) + + +def _deserialize_bytes(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + return bytes(base64.b64decode(attr)) + + +def _deserialize_bytes_base64(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return bytes(base64.b64decode(encoded)) + + +def _deserialize_duration(attr): + if isinstance(attr, timedelta): + return attr + return isodate.parse_duration(attr) + + +def _deserialize_decimal(attr): + if isinstance(attr, decimal.Decimal): + return attr + return decimal.Decimal(str(attr)) + + +def _deserialize_int_as_str(attr): + if isinstance(attr, int): + return attr + return int(attr) + + +_DESERIALIZE_MAPPING = { + datetime: _deserialize_datetime, + date: _deserialize_date, + time: _deserialize_time, + bytes: _deserialize_bytes, + bytearray: _deserialize_bytes, + timedelta: _deserialize_duration, + typing.Any: lambda x: x, + decimal.Decimal: _deserialize_decimal, +} + +_DESERIALIZE_MAPPING_WITHFORMAT = { + "rfc3339": _deserialize_datetime, + "rfc7231": _deserialize_datetime_rfc7231, + "unix-timestamp": _deserialize_datetime_unix_timestamp, + "base64": _deserialize_bytes, + "base64url": _deserialize_bytes_base64, +} + + +def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if annotation is int and rf and rf._format == "str": + return _deserialize_int_as_str + if rf and rf._format: + return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) + return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore + + +def _get_type_alias_type(module_name: str, alias_name: str): + types = { + k: v + for k, v in sys.modules[module_name].__dict__.items() + if isinstance(v, typing._GenericAlias) # type: ignore + } + if alias_name not in types: + return alias_name + return types[alias_name] + + +def _get_model(module_name: str, model_name: str): + models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} + module_end = module_name.rsplit(".", 1)[0] + models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) + if isinstance(model_name, str): + model_name = model_name.split(".")[-1] + if model_name not in models: + return model_name + return models[model_name] + + +_UNSET = object() + + +class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object + def __init__(self, data: typing.Dict[str, typing.Any]) -> None: + self._data = data + + def __contains__(self, key: typing.Any) -> bool: + return key in self._data + + def __getitem__(self, key: str) -> typing.Any: + return self._data.__getitem__(key) + + def __setitem__(self, key: str, value: typing.Any) -> None: + self._data.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + self._data.__delitem__(key) + + def __iter__(self) -> typing.Iterator[typing.Any]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __ne__(self, other: typing.Any) -> bool: + return not self.__eq__(other) + + def keys(self) -> typing.KeysView[str]: + return self._data.keys() + + def values(self) -> typing.ValuesView[typing.Any]: + return self._data.values() + + def items(self) -> typing.ItemsView[str, typing.Any]: + return self._data.items() + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + try: + return self[key] + except KeyError: + return default + + @typing.overload + def pop(self, key: str) -> typing.Any: ... + + @typing.overload + def pop(self, key: str, default: _T) -> _T: ... + + @typing.overload + def pop(self, key: str, default: typing.Any) -> typing.Any: ... + + def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.pop(key) + return self._data.pop(key, default) + + def popitem(self) -> typing.Tuple[str, typing.Any]: + return self._data.popitem() + + def clear(self) -> None: + self._data.clear() + + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + self._data.update(*args, **kwargs) + + @typing.overload + def setdefault(self, key: str, default: None = None) -> None: ... + + @typing.overload + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... + + def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.setdefault(key) + return self._data.setdefault(key, default) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_model = self.__class__(other) + except Exception: + return False + return self._data == other_model._data + + def __repr__(self) -> str: + return str(self._data) + + +def _is_model(obj: typing.Any) -> bool: + return getattr(obj, "_is_model", False) + + +def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements + if isinstance(o, list): + return [_serialize(x, format) for x in o] + if isinstance(o, dict): + return {k: _serialize(v, format) for k, v in o.items()} + if isinstance(o, set): + return {_serialize(x, format) for x in o} + if isinstance(o, tuple): + return tuple(_serialize(x, format) for x in o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, format) + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, enum.Enum): + return o.value + if isinstance(o, int): + if format == "str": + return str(o) + return o + try: + # First try datetime.datetime + return _serialize_datetime(o, format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return o + + +def _get_rest_field( + attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str +) -> typing.Optional["_RestField"]: + try: + return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) + except StopIteration: + return None + + +def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + if isinstance(value, ET.Element): + value = _deserialize(rf._type, value) + return _serialize(value, rf._format) + + +class Model(_MyMutableMapping): + _is_model = True + # label whether current class's _attr_to_rest_field has been calculated + # could not see _attr_to_rest_field directly because subclass inherits it from parent class + _calculated: typing.Set[str] = set() + + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: + class_name = self.__class__.__name__ + if len(args) > 1: + raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") + dict_to_pass = { + rest_field._rest_name: rest_field._default + for rest_field in self._attr_to_rest_field.values() + if rest_field._default is not _UNSET + } + if args: # pylint: disable=too-many-nested-blocks + if isinstance(args[0], ET.Element): + existed_attr_keys = [] + model_meta = getattr(self, "_xml", {}) + + for rf in self._attr_to_rest_field.values(): + prop_meta = getattr(rf, "_xml", {}) + xml_name = prop_meta.get("name", rf._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + # attribute + if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name)) + continue + + # unwrapped element is array + if prop_meta.get("unwrapped", False): + # unwrapped array could either use prop items meta/prop meta + if prop_meta.get("itemsName"): + xml_name = prop_meta.get("itemsName") + xml_ns = prop_meta.get("itemNs") + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + items = args[0].findall(xml_name) # pyright: ignore + if len(items) > 0: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + continue + + # text element is primitive type + if prop_meta.get("text", False): + if args[0].text is not None: + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text) + continue + + # wrapped element could be normal property or array, it should only have one element + item = args[0].find(xml_name) + if item is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, item) + + # rest thing is additional properties + for e in args[0]: + if e.tag not in existed_attr_keys: + dict_to_pass[e.tag] = _convert_element(e) + else: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) + else: + non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] + if non_attr_kwargs: + # actual type errors only throw the first wrong keyword arg they see, so following that. + raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") + dict_to_pass.update( + { + self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) + for k, v in kwargs.items() + if v is not None + } + ) + super().__init__(dict_to_pass) + + def copy(self) -> "Model": + return Model(self.__dict__) + + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: + if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: + # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', + # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' + mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order + attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") + for k, v in mro_class.__annotations__.items() + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") + + return super().__new__(cls) # pylint: disable=no-value-for-parameter + + def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: + for base in cls.__bases__: + if hasattr(base, "__mapping__"): + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore + + @classmethod + def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: + for v in cls.__dict__.values(): + if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators: + return v + return None + + @classmethod + def _deserialize(cls, data, exist_discriminators): + if not hasattr(cls, "__mapping__"): + return cls(data) + discriminator = cls._get_discriminator(exist_discriminators) + if discriminator is None: + return cls(data) + exist_discriminators.append(discriminator._rest_name) + if isinstance(data, ET.Element): + model_meta = getattr(cls, "_xml", {}) + prop_meta = getattr(discriminator, "_xml", {}) + xml_name = prop_meta.get("name", discriminator._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + if data.get(xml_name) is not None: + discriminator_value = data.get(xml_name) + else: + discriminator_value = data.find(xml_name).text # pyright: ignore + else: + discriminator_value = data.get(discriminator._rest_name) + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore + return mapped_cls._deserialize(data, exist_discriminators) + + def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: + """Return a dict that can be turned into json using json.dump. + + :keyword bool exclude_readonly: Whether to remove the readonly properties. + :returns: A dict JSON compatible object + :rtype: dict + """ + + result = {} + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] + for k, v in self.items(): + if exclude_readonly and k in readonly_props: # pyright: ignore + continue + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) + return result + + @staticmethod + def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: + if v is None or isinstance(v, _Null): + return None + if isinstance(v, (list, tuple, set)): + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) + if isinstance(v, dict): + return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} + return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v + + +def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + +def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + +def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + +def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj: typing.Dict[typing.Any, typing.Any], +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = {child.tag: child for child in obj} + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + +def _deserialize_multiple_sequence( + entry_deserializers: typing.List[typing.Optional[typing.Callable]], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) + + +def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = list(obj) + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + +def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.Any]: + return sorted( + types, + key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), + ) + + +def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-branches + annotation: typing.Any, + module: typing.Optional[str], + rf: typing.Optional["_RestField"] = None, +) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + if not annotation: + return None + + # is it a type alias? + if isinstance(annotation, str): + if module is not None: + annotation = _get_type_alias_type(module, annotation) + + # is it a forward ref / in quotes? + if isinstance(annotation, (str, typing.ForwardRef)): + try: + model_name = annotation.__forward_arg__ # type: ignore + except AttributeError: + model_name = annotation + if module is not None: + annotation = _get_model(module, model_name) + + try: + if module and _is_model(annotation): + if rf: + rf._is_model = True + + return functools.partial(_deserialize_model, annotation) # pyright: ignore + except Exception: + pass + + # is it a literal? + try: + if annotation.__origin__ is typing.Literal: # pyright: ignore + return None + except AttributeError: + pass + + # is it optional? + try: + if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore + if len(annotation.__args__) <= 2: # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + # the type is Optional[Union[...]], we need to remove the None type from the Union + annotation_copy = copy.copy(annotation) + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore + return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) + except AttributeError: + pass + + # is it union? + if getattr(annotation, "__origin__", None) is typing.Union: + # initial ordering is we make `string` the last deserialization option, because it is often them most generic + deserializers = [ + _get_deserialize_callable_from_annotation(arg, module, rf) + for arg in _sorted_annotations(annotation.__args__) # pyright: ignore + ] + + return functools.partial(_deserialize_with_union, deserializers) + + try: + if annotation._name == "Dict": # pyright: ignore + value_deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[1], module, rf # pyright: ignore + ) + + return functools.partial( + _deserialize_dict, + value_deserializer, + module, + ) + except (AttributeError, IndexError): + pass + try: + if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore + if len(annotation.__args__) > 1: # pyright: ignore + entry_deserializers = [ + _get_deserialize_callable_from_annotation(dt, module, rf) + for dt in annotation.__args__ # pyright: ignore + ] + return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) + deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[0], module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_sequence, deserializer, module) + except (TypeError, IndexError, AttributeError, SyntaxError): + pass + + def _deserialize_default( + deserializer, + obj, + ): + if obj is None: + return obj + try: + return _deserialize_with_callable(deserializer, obj) + except Exception: + pass + return obj + + if get_deserializer(annotation, rf): + return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) + + return functools.partial(_deserialize_default, annotation) + + +def _deserialize_with_callable( + deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], + value: typing.Any, +): # pylint: disable=too-many-return-statements + try: + if value is None or isinstance(value, _Null): + return None + if isinstance(value, ET.Element): + if deserializer is str: + return value.text or "" + if deserializer is int: + return int(value.text) if value.text else None + if deserializer is float: + return float(value.text) if value.text else None + if deserializer is bool: + return value.text == "true" if value.text else None + if deserializer is None: + return value + if deserializer in [int, float, bool]: + return deserializer(value) + if isinstance(deserializer, CaseInsensitiveEnumMeta): + try: + return deserializer(value) + except ValueError: + # for unknown value, return raw value + return value + if isinstance(deserializer, type) and issubclass(deserializer, Model): + return deserializer._deserialize(value, []) + return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) + except Exception as e: + raise DeserializationError() from e + + +def _deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + if isinstance(value, PipelineResponse): + value = value.http_response.json() + if rf is None and format: + rf = _RestField(format=format) + if not isinstance(deserializer, functools.partial): + deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) + return _deserialize_with_callable(deserializer, value) + + +class _RestField: + def __init__( + self, + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + is_discriminator: bool = False, + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, + ): + self._type = type + self._rest_name_input = name + self._module: typing.Optional[str] = None + self._is_discriminator = is_discriminator + self._visibility = visibility + self._is_model = False + self._default = default + self._format = format + self._is_multipart_file_input = is_multipart_file_input + self._xml = xml if xml is not None else {} + + @property + def _class_type(self) -> typing.Any: + return getattr(self._type, "args", [None])[0] + + @property + def _rest_name(self) -> str: + if self._rest_name_input is None: + raise ValueError("Rest name was never set") + return self._rest_name_input + + def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin + # by this point, type and rest_name will have a value bc we default + # them in __new__ of the Model class + item = obj.get(self._rest_name) + if item is None: + return item + if self._is_model: + return item + return _deserialize(self._type, _serialize(item, self._format), rf=self) + + def __set__(self, obj: Model, value) -> None: + if value is None: + # we want to wipe out entries if users set attr to None + try: + obj.__delitem__(self._rest_name) + except KeyError: + pass + return + if self._is_model: + if not _is_model(value): + value = _deserialize(self._type, value) + obj.__setitem__(self._rest_name, value) + return + obj.__setitem__(self._rest_name, _serialize(value, self._format)) + + def _get_deserialize_callable_from_annotation( + self, annotation: typing.Any + ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + return _get_deserialize_callable_from_annotation(annotation, self._module, self) + + +def rest_field( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + xml=xml, + ) + + +def rest_discriminator( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) + + +def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: + """Serialize a model to XML. + + :param Model model: The model to serialize. + :param bool exclude_readonly: Whether to exclude readonly properties. + :returns: The XML representation of the model. + :rtype: str + """ + return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore + + +def _get_element( + o: typing.Any, + exclude_readonly: bool = False, + parent_meta: typing.Optional[typing.Dict[str, typing.Any]] = None, + wrapped_element: typing.Optional[ET.Element] = None, +) -> typing.Union[ET.Element, typing.List[ET.Element]]: + if _is_model(o): + model_meta = getattr(o, "_xml", {}) + + # if prop is a model, then use the prop element directly, else generate a wrapper of model + if wrapped_element is None: + wrapped_element = _create_xml_element( + model_meta.get("name", o.__class__.__name__), + model_meta.get("prefix"), + model_meta.get("ns"), + ) + + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + + for k, v in o.items(): + # do not serialize readonly properties + if exclude_readonly and k in readonly_props: + continue + + prop_rest_field = _get_rest_field(o._attr_to_rest_field, k) + if prop_rest_field: + prop_meta = getattr(prop_rest_field, "_xml").copy() + # use the wire name as xml name if no specific name is set + if prop_meta.get("name") is None: + prop_meta["name"] = k + else: + # additional properties will not have rest field, use the wire name as xml name + prop_meta = {"name": k} + + # if no ns for prop, use model's + if prop_meta.get("ns") is None and model_meta.get("ns"): + prop_meta["ns"] = model_meta.get("ns") + prop_meta["prefix"] = model_meta.get("prefix") + + if prop_meta.get("unwrapped", False): + # unwrapped could only set on array + wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta)) + elif prop_meta.get("text", False): + # text could only set on primitive type + wrapped_element.text = _get_primitive_type_value(v) + elif prop_meta.get("attribute", False): + xml_name = prop_meta.get("name", k) + if prop_meta.get("ns"): + ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore + xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore + # attribute should be primitive type + wrapped_element.set(xml_name, _get_primitive_type_value(v)) + else: + # other wrapped prop element + wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta)) + return wrapped_element + if isinstance(o, list): + return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore + if isinstance(o, dict): + result = [] + for k, v in o.items(): + result.append( + _get_wrapped_element( + v, + exclude_readonly, + { + "name": k, + "ns": parent_meta.get("ns") if parent_meta else None, + "prefix": parent_meta.get("prefix") if parent_meta else None, + }, + ) + ) + return result + + # primitive case need to create element based on parent_meta + if parent_meta: + return _get_wrapped_element( + o, + exclude_readonly, + { + "name": parent_meta.get("itemsName", parent_meta.get("name")), + "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")), + "ns": parent_meta.get("itemsNs", parent_meta.get("ns")), + }, + ) + + raise ValueError("Could not serialize value into xml: " + o) + + +def _get_wrapped_element( + v: typing.Any, + exclude_readonly: bool, + meta: typing.Optional[typing.Dict[str, typing.Any]], +) -> ET.Element: + wrapped_element = _create_xml_element( + meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None + ) + if isinstance(v, (dict, list)): + wrapped_element.extend(_get_element(v, exclude_readonly, meta)) + elif _is_model(v): + _get_element(v, exclude_readonly, meta, wrapped_element) + else: + wrapped_element.text = _get_primitive_type_value(v) + return wrapped_element + + +def _get_primitive_type_value(v) -> str: + if v is True: + return "true" + if v is False: + return "false" + if isinstance(v, _Null): + return "" + return str(v) + + +def _create_xml_element(tag, prefix=None, ns=None): + if prefix and ns: + ET.register_namespace(prefix, ns) + if ns: + return ET.Element("{" + ns + "}" + tag) + return ET.Element(tag) + + +def _deserialize_xml( + deserializer: typing.Any, + value: str, +) -> typing.Any: + element = ET.fromstring(value) # nosec + return _deserialize(deserializer, element) + + +def _convert_element(e: ET.Element): + # dict case + if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: + dict_result: typing.Dict[str, typing.Any] = {} + for child in e: + if dict_result.get(child.tag) is not None: + if isinstance(dict_result[child.tag], list): + dict_result[child.tag].append(_convert_element(child)) + else: + dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)] + else: + dict_result[child.tag] = _convert_element(child) + dict_result.update(e.attrib) + return dict_result + # array case + if len(e) > 0: + array_result: typing.List[typing.Any] = [] + for child in e: + array_result.append(_convert_element(child)) + return array_result + # primitive case + return e.text diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_serialization.py b/sdk/ai/azure-ai-project/azure/ai/project/_serialization.py new file mode 100644 index 000000000000..ce17d1798ce7 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/_serialization.py @@ -0,0 +1,2114 @@ +# pylint: disable=too-many-lines +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Dict, + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + TypeVar, + MutableMapping, + Type, + List, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore + +from azure.core.exceptions import DeserializationError, SerializationError +from azure.core.serialization import NULL as CoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +ModelType = TypeVar("ModelType", bound="Model") +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + :return: The deserialized data. + :rtype: object + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) from err + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError as err: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise DeserializationError("XML is invalid") from err + elif content_type.startswith("text/"): + return data_as_str + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + + :param bytes body_bytes: The body of the response. + :param dict headers: The headers of the response. + :returns: The deserialized data. + :rtype: object + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + + +class UTC(datetime.tzinfo): + """Time Zone info for handling UTC""" + + def utcoffset(self, dt): + """UTF offset for UTC is 0. + + :param datetime.datetime dt: The datetime + :returns: The offset + :rtype: datetime.timedelta + """ + return datetime.timedelta(0) + + def tzname(self, dt): + """Timestamp representation. + + :param datetime.datetime dt: The datetime + :returns: The timestamp representation + :rtype: str + """ + return "Z" + + def dst(self, dt): + """No daylight saving for UTC. + + :param datetime.datetime dt: The datetime + :returns: The daylight saving time + :rtype: datetime.timedelta + """ + return datetime.timedelta(hours=1) + + +try: + from datetime import timezone as _FixedOffset # type: ignore +except ImportError: # Python 2.7 + + class _FixedOffset(datetime.tzinfo): # type: ignore + """Fixed offset in minutes east from UTC. + Copy/pasted from Python doc + :param datetime.timedelta offset: offset in timedelta format + """ + + def __init__(self, offset) -> None: + self.__offset = offset + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return str(self.__offset.total_seconds() / 3600) + + def __repr__(self): + return "".format(self.tzname(None)) + + def dst(self, dt): + return datetime.timedelta(0) + + def __getinitargs__(self): + return (self.__offset,) + + +try: + from datetime import timezone + + TZ_UTC = timezone.utc +except ImportError: + TZ_UTC = UTC() # type: ignore + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Optional[Dict[str, Any]] = {} + for k in kwargs: # pylint: disable=consider-using-dict-items + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are equal + :rtype: bool + """ + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are not equal + :rtype: bool + """ + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node. + + :returns: The XML node + :rtype: xml.etree.ElementTree.Element + """ + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to server from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, keep_readonly=keep_readonly, **kwargs + ) + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs + ) + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: # pylint: disable=broad-exception-caught + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + :rtype: ModelType + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def from_dict( + cls: Type[ModelType], + data: Any, + key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> ModelType: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param function key_extractors: A key extractor function. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + :rtype: ModelType + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result.update(objects[valuetype]._flatten_subtype(key, objects)) # pylint: disable=protected-access + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + + :param dict response: The initial data + :param dict objects: The class objects + :returns: The class to be used + :rtype: class + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + :returns: The decoded key + :rtype: str + """ + return key.replace("\\.", ".") + + +class Serializer(object): # pylint: disable=too-many-public-methods + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals + self, target_obj, data_type=None, **kwargs + ): + """Serialize data into a string according to type. + + :param object target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises: SerializationError if serialization fails. + :returns: The serialized data. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() # pylint: disable=protected-access + try: + attributes = target_obj._attribute_map # pylint: disable=protected-access + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access + attr_name, {} + ).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized.update(target_obj.additional_properties) + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, + # we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise SerializationError(msg) from err + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises: SerializationError if serialization fails. + :raises: ValueError if data is None + :returns: The serialized request body + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access + except DeserializationError as err: + raise SerializationError("Unable to build a model: " + str(err)) from err + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param str name: The name of the URL path parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :returns: The serialized URL path + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + output = output.replace("{", quote("{")).replace("}", quote("}")) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param str name: The name of the query parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, list + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + :returns: The serialized query parameter + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get("skip_quote", False) + return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param str name: The name of the header. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + :returns: The serialized header + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :raises: AttributeError if required data is None. + :raises: ValueError if data is None + :raises: SerializationError if serialization fails. + :returns: The serialized data. + :rtype: str, int, float, bool, dict, list + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is CoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + if data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, data.__class__) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise SerializationError(msg.format(data, data_type)) from err + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param obj data: Object to be serialized. + :param str data_type: Type of object in the iterable. + :rtype: str, int, float, bool + :return: serialized object + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec # pylint: disable=eval-used + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param str data: Object to be serialized. + :rtype: str + :return: serialized object + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list data: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + Defaults to False. + :rtype: list, str + :return: serialized iterable + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get("do_quote", False): + serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :rtype: dict + :return: serialized dictionary + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + :return: serialized object + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + if obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError as exc: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) from exc + + @staticmethod + def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument + """Serialize bytearray into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument + """Serialize str into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Decimal object to float. + + :param decimal attr: Object to be serialized. + :rtype: float + :return: serialized decimal + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): # pylint: disable=unused-argument + """Serialize long (Py2) or int (Py3). + + :param int attr: Object to be serialized. + :rtype: int/long + :return: serialized long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + :return: serialized date + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + :return: serialized time + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + :return: serialized duration + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: TypeError if format invalid. + :return: serialized rfc + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError as exc: + raise TypeError("RFC1123 object must be valid Datetime object.") from exc + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: SerializationError if format invalid. + :return: serialized iso + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise SerializationError(msg) from err + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise TypeError(msg) from err + + @staticmethod + def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises: SerializationError if format invalid + :return: serialied unix + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError as exc: + raise TypeError("Unix time object must be valid Datetime object.") from exc + + +def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(List[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements + attr, attr_desc, data +): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( # pylint: disable=line-too-long + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer(object): + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, str): + return self.deserialize_data(data, response) + if isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None or data is CoreNull: + return data + try: + attributes = response._attribute_map # type: ignore # pylint: disable=protected-access + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise DeserializationError(msg) from err + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :return: The classified target object and its class name. + :rtype: tuple + """ + if target is None: + return None, None + + if isinstance(target, str): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + :return: Deserialized object. + :rtype: object + """ + try: + return self(target_obj, data, content_type=content_type) + except: # pylint: disable=bare-except + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param obj raw_data: Data to be processed. + :param str content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + :rtype: object + :return: Unpacked content. + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param Response response: The response model class. + :param dict attrs: The deserialized response attributes. + :param dict additional_properties: Additional properties to be set. + :rtype: Response + :return: The instantiated response model. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [ + k for k, v in response._validation.items() if v.get("readonly") # pylint: disable=protected-access + ] + const = [ + k for k, v in response._validation.items() if v.get("constant") # pylint: disable=protected-access + ] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) from err + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) from exp + + def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment + "object", + "[]", + r"{}", + ] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise DeserializationError(msg) from err + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :return: Deserialized iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :return: Deserialized dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :return: Deserialized object. + :rtype: dict + :raises: TypeError if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, str): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :return: Deserialized basic type. + :rtype: str, int, float or bool + :raises: TypeError if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + if isinstance(attr, str): + if attr.lower() in ["true", "1"]: + return True + if attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec # pylint: disable=eval-used + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :return: Deserialized string. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :return: Deserialized enum object. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + try: + return list(enum_obj.__members__.values())[data] + except IndexError as exc: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) from exc + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :return: Deserialized bytearray + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :return: Deserialized base64 string + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :return: Deserialized decimal + :raises: DeserializationError if string format invalid. + :rtype: decimal + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(str(attr)) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise DeserializationError(msg) from err + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :return: Deserialized int + :rtype: long or int + :raises: ValueError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :return: Deserialized duration + :rtype: TimeDelta + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise DeserializationError(msg) from err + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :return: Deserialized date + :rtype: Date + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=0, defaultday=0) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :return: Deserialized time + :rtype: datetime.time + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized RFC datetime + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized ISO datetime + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :return: Deserialized datetime + :rtype: Datetime + :raises: DeserializationError if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + attr = int(attr) + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise DeserializationError(msg) from err + return date_obj diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_types.py b/sdk/ai/azure-ai-project/azure/ai/project/_types.py new file mode 100644 index 000000000000..c438829bda41 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/_types.py @@ -0,0 +1,18 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING, Union + +if TYPE_CHECKING: + from . import models as _models + from .. import models as _models +AgentsApiResponseFormatOption = Union[ + str, str, "_models.AgentsApiResponseFormatMode", "_models.AgentsApiResponseFormat" +] +MessageAttachmentToolDefinition = Union["_models.CodeInterpreterToolDefinition", "_models.FileSearchToolDefinition"] +AgentsApiToolChoiceOption = Union[str, str, "_models.AgentsApiToolChoiceOptionMode", "_models.AgentsNamedToolChoice"] diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_vendor.py b/sdk/ai/azure-ai-project/azure/ai/project/_vendor.py new file mode 100644 index 000000000000..e6f010934827 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/_vendor.py @@ -0,0 +1,50 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import json +from typing import Any, Dict, IO, List, Mapping, Optional, Tuple, Union + +from ._model_base import Model, SdkJSONEncoder + + +# file-like tuple could be `(filename, IO (or bytes))` or `(filename, IO (or bytes), content_type)` +FileContent = Union[str, bytes, IO[str], IO[bytes]] + +FileType = Union[ + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], FileContent, Optional[str]], +] + + +def serialize_multipart_data_entry(data_entry: Any) -> Any: + if isinstance(data_entry, (list, tuple, dict, Model)): + return json.dumps(data_entry, cls=SdkJSONEncoder, exclude_readonly=True) + return data_entry + + +def prepare_multipart_form_data( + body: Mapping[str, Any], multipart_fields: List[str], data_fields: List[str] +) -> Tuple[List[FileType], Dict[str, Any]]: + files: List[FileType] = [] + data: Dict[str, Any] = {} + for multipart_field in multipart_fields: + multipart_entry = body.get(multipart_field) + if isinstance(multipart_entry, list): + files.extend([(multipart_field, e) for e in multipart_entry]) + elif multipart_entry: + files.append((multipart_field, multipart_entry)) + + for data_field in data_fields: + data_entry = body.get(data_field) + if data_entry: + data[data_field] = serialize_multipart_data_entry(data_entry) + + return files, data diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_version.py b/sdk/ai/azure-ai-project/azure/ai/project/_version.py new file mode 100644 index 000000000000..be71c81bd282 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0.0b1" diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/__init__.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/__init__.py new file mode 100644 index 000000000000..d5beb6bf7f83 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/aio/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import AIProjectClient # type: ignore + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AIProjectClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/_client.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/_client.py new file mode 100644 index 000000000000..f0d74b0d7477 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/aio/_client.py @@ -0,0 +1,139 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, TYPE_CHECKING +from typing_extensions import Self + +from azure.core import AsyncPipelineClient +from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest + +from .._serialization import Deserializer, Serializer +from ._configuration import AIProjectClientConfiguration +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + + +class AIProjectClient: + """AIProjectClient. + + :ivar agents: AgentsOperations operations + :vartype agents: azure.ai.project.aio.operations.AgentsOperations + :ivar connections: ConnectionsOperations operations + :vartype connections: azure.ai.project.aio.operations.ConnectionsOperations + :ivar evaluations: EvaluationsOperations operations + :vartype evaluations: azure.ai.project.aio.operations.EvaluationsOperations + :param endpoint: The Azure AI Studio project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``\\\\ , where + :code:`` is the Azure region where the project is deployed (e.g. westus) and + :code:`` is the GUID of the Enterprise private link. Required. + :type endpoint: str + :param subscription_id: The Azure subscription ID. Required. + :type subscription_id: str + :param resource_group_name: The name of the Azure Resource Group. Required. + :type resource_group_name: str + :param project_name: The Azure AI Studio project name. Required. + :type project_name: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-07-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: "AsyncTokenCredential", + **kwargs: Any + ) -> None: + _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" # pylint: disable=line-too-long + self._config = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + **kwargs + ) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) + self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) + self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/_configuration.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/_configuration.py new file mode 100644 index 000000000000..0785b42c4c94 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/aio/_configuration.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline import policies + +from .._version import VERSION + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + + +class AIProjectClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AIProjectClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: The Azure AI Studio project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``\\ , where :code:`` + is the Azure region where the project is deployed (e.g. westus) and :code:`` + is the GUID of the Enterprise private link. Required. + :type endpoint: str + :param subscription_id: The Azure subscription ID. Required. + :type subscription_id: str + :param resource_group_name: The name of the Azure Resource Group. Required. + :type resource_group_name: str + :param project_name: The Azure AI Studio project name. Required. + :type project_name: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-07-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: "AsyncTokenCredential", + **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "2024-07-01-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + if resource_group_name is None: + raise ValueError("Parameter 'resource_group_name' must not be None.") + if project_name is None: + raise ValueError("Parameter 'project_name' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.subscription_id = subscription_id + self.resource_group_name = resource_group_name + self.project_name = project_name + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-project/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/aio/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/__init__.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/__init__.py new file mode 100644 index 000000000000..35cf92df96bc --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import AgentsOperations # type: ignore +from ._operations import ConnectionsOperations # type: ignore +from ._operations import EvaluationsOperations # type: ignore + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AgentsOperations", + "ConnectionsOperations", + "EvaluationsOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_operations.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_operations.py new file mode 100644 index 000000000000..6fb61ba111ef --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_operations.py @@ -0,0 +1,6049 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, TYPE_CHECKING, TypeVar, Union, overload +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import _model_base, models as _models +from ..._model_base import SdkJSONEncoder, _deserialize +from ..._vendor import FileType, prepare_multipart_form_data +from ...operations._operations import ( + build_agents_cancel_run_request, + build_agents_cancel_vector_store_file_batch_request, + build_agents_create_agent_request, + build_agents_create_message_request, + build_agents_create_run_request, + build_agents_create_thread_and_run_request, + build_agents_create_thread_request, + build_agents_create_vector_store_file_batch_request, + build_agents_create_vector_store_file_request, + build_agents_create_vector_store_request, + build_agents_delete_agent_request, + build_agents_delete_file_request, + build_agents_delete_thread_request, + build_agents_delete_vector_store_file_request, + build_agents_delete_vector_store_request, + build_agents_get_agent_request, + build_agents_get_file_content_request, + build_agents_get_file_request, + build_agents_get_message_request, + build_agents_get_run_request, + build_agents_get_run_step_request, + build_agents_get_thread_request, + build_agents_get_vector_store_file_batch_request, + build_agents_get_vector_store_file_request, + build_agents_get_vector_store_request, + build_agents_list_agents_request, + build_agents_list_files_request, + build_agents_list_messages_request, + build_agents_list_run_steps_request, + build_agents_list_runs_request, + build_agents_list_vector_store_file_batch_files_request, + build_agents_list_vector_store_files_request, + build_agents_list_vector_stores_request, + build_agents_modify_vector_store_request, + build_agents_submit_tool_outputs_to_run_request, + build_agents_update_agent_request, + build_agents_update_message_request, + build_agents_update_run_request, + build_agents_update_thread_request, + build_agents_upload_file_request, + build_connections_get_request, + build_connections_list_request, + build_connections_list_secrets_request, + build_evaluations_create_or_replace_schedule_request, + build_evaluations_create_request, + build_evaluations_delete_schedule_request, + build_evaluations_get_request, + build_evaluations_get_schedule_request, + build_evaluations_list_request, + build_evaluations_list_schedule_request, + build_evaluations_update_request, +) + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore + +if TYPE_CHECKING: + from ... import _types +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class AgentsOperations: # pylint: disable=too-many-public-methods + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.project.aio.AIProjectClient`'s + :attr:`agents` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + if model is _Unset: + raise TypeError("missing required argument: model") + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_agent_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_agents( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfAgent: + """Gets a list of agents that were previously created. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with + MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfAgent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) + + _request = build_agents_list_agents_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: + """Retrieves an existing agent. + + :param assistant_id: Identifier of the agent. Required. + :type assistant_id: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + _request = build_agents_get_agent_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_agent( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_agent( + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_agent( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_agent( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_agent_request( + assistant_id=assistant_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: + """Deletes an agent. + + :param assistant_id: Identifier of the agent. Required. + :type assistant_id: str + :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_agent_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_thread( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread( + self, + *, + content_type: str = "application/json", + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.project.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_thread( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.project.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_thread_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: + """Gets information about an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + _request = build_agents_get_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_thread( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_thread( + self, + thread_id: str, + *, + content_type: str = "application/json", + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_thread( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_thread( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_thread_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: + """Deletes an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_message( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_message( + self, + thread_id: str, + *, + role: Union[str, _models.MessageRole], + content: str, + content_type: str = "application/json", + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.project.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.project.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_message( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_message( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + role: Union[str, _models.MessageRole] = _Unset, + content: str = _Unset, + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.project.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.project.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + if role is _Unset: + raise TypeError("missing required argument: role") + if content is _Unset: + raise TypeError("missing required argument: content") + body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_message_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_messages( + self, + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadMessage: + """Gets a list of messages that exist on a thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword run_id: Filter messages by the run ID that generated them. Default value is None. + :paramtype run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible + with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) + + _request = build_agents_list_messages_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: + """Gets an existing message from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + _request = build_agents_get_message_request( + thread_id=thread_id, + message_id=message_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_message( + self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_message( + self, + thread_id: str, + message_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_message( + self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_message( + self, + thread_id: str, + message_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_message_request( + thread_id=thread_id, + message_id=message_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_run( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "additional_instructions": additional_instructions, + "additional_messages": additional_messages, + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_run_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_runs( + self, + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadRun: + """Gets a list of runs for a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with + MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_list_runs_request( + thread_id=thread_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Gets an existing run from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_get_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_run( + self, + thread_id: str, + run_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if tool_outputs is _Unset: + raise TypeError("missing required argument: tool_outputs") + body = {"stream": stream_parameter, "tool_outputs": tool_outputs} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_submit_tool_outputs_to_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Cancels a run of an in progress thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_cancel_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_thread_and_run( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread_and_run( + self, + *, + assistant_id: str, + content_type: str = "application/json", + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :keyword assistant_id: The ID of the agent for which the thread should be created. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.project.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread_and_run( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_thread_and_run( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent for which the thread should be created. Required. + :paramtype assistant_id: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.project.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "thread": thread, + "tool_choice": tool_choice, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_thread_and_run_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> _models.RunStep: + """Gets a single run step from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param step_id: Identifier of the run step. Required. + :type step_id: str + :return: RunStep. The RunStep is compatible with MutableMapping + :rtype: ~azure.ai.project.models.RunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) + + _request = build_agents_get_run_step_request( + thread_id=thread_id, + run_id=run_id, + step_id=step_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.RunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_run_steps( + self, + thread_id: str, + run_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfRunStep: + """Gets a list of run steps from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with + MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfRunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) + + _request = build_agents_list_run_steps_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_files( + self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any + ) -> _models.FileListResponse: + """Gets a list of previously uploaded files. + + :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is + None. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :return: FileListResponse. The FileListResponse is compatible with MutableMapping + :rtype: ~azure.ai.project.models.FileListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) + + _request = build_agents_list_files_request( + purpose=purpose, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileListResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: The file data, in bytes. Required. + :paramtype file: ~azure.ai.project._vendor.FileType + :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and + Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :keyword filename: The name of the file. Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def upload_file( + self, + body: JSON = _Unset, + *, + file: FileType = _Unset, + purpose: Union[str, _models.FilePurpose] = _Unset, + filename: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Is one of the following types: JSON Required. + :type body: JSON + :keyword file: The file data, in bytes. Required. + :paramtype file: ~azure.ai.project._vendor.FileType + :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and + Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :keyword filename: The name of the file. Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + if body is _Unset: + if file is _Unset: + raise TypeError("missing required argument: file") + if purpose is _Unset: + raise TypeError("missing required argument: purpose") + body = {"file": file, "filename": filename, "purpose": purpose} + body = {k: v for k, v in body.items() if v is not None} + _body = body.as_dict() if isinstance(body, _model_base.Model) else body + _file_fields: List[str] = ["file"] + _data_fields: List[str] = ["purpose", "filename"] + _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) + + _request = build_agents_upload_file_request( + api_version=self._config.api_version, + files=_files, + data=_data, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: + """Delete a previously uploaded file. + + :param file_id: The ID of the file to delete. Required. + :type file_id: str + :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.project.models.FileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _request = build_agents_get_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentResponse: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: FileContentResponse. The FileContentResponse is compatible with MutableMapping + :rtype: ~azure.ai.project.models.FileContentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileContentResponse] = kwargs.pop("cls", None) + + _request = build_agents_get_file_content_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileContentResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_stores( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStore: + """Returns a list of vector stores. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible + with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfVectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_stores_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "chunking_strategy": chunking_strategy, + "expires_after": expires_after, + "file_ids": file_ids, + "metadata": metadata, + "name": name, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: + """Returns the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def modify_vector_store( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def modify_vector_store( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def modify_vector_store( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def modify_vector_store( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"expires_after": expires_after, "metadata": metadata, "name": name} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_modify_vector_store_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: + """Deletes the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_store_files( + self, + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.project.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_store_files_request( + vector_store_id=vector_store_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store_file( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file( + self, + vector_store_id: str, + *, + file_id: str, + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_id: Identifier of the file. Required. + :paramtype file_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_id: str = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_id: Identifier of the file. Required. + :paramtype file_id: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + if body is _Unset: + if file_id is _Unset: + raise TypeError("missing required argument: file_id") + body = {"chunking_strategy": chunking_strategy, "file_id": file_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_file_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: + """Retrieves a vector store file. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_vector_store_file( + self, vector_store_id: str, file_id: str, **kwargs: Any + ) -> _models.VectorStoreFileDeletionStatus: + """Delete a vector store file. This will remove the file from the vector store but the file itself + will not be deleted. + To delete the file, use the delete file endpoint. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store_file_batch( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch( + self, + vector_store_id: str, + *, + file_ids: List[str], + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file_batch( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: List[str] = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + if body is _Unset: + if file_ids is _Unset: + raise TypeError("missing required argument: file_ids") + body = {"chunking_strategy": chunking_strategy, "file_ids": file_ids} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_file_batch_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Retrieve a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def cancel_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch + as soon as possible. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_agents_cancel_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_store_file_batch_files( + self, + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files in a batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.project.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_store_file_batch_files_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class ConnectionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.project.aio.AIProjectClient`'s + :attr:`connections` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def _list( + self, + *, + category: Optional[Union[str, _models.ConnectionType]] = None, + include_all: Optional[bool] = None, + target: Optional[str] = None, + **kwargs: Any + ) -> _models._models.ConnectionsListResponse: + """List the details of all the connections (not including their credentials). + + :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". Default value is None. + :paramtype category: str or ~azure.ai.project.models.ConnectionType + :keyword include_all: Indicates whether to list datastores. Service default: do not list + datastores. Default value is None. + :paramtype include_all: bool + :keyword target: Target of the workspace connection. Default value is None. + :paramtype target: str + :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping + :rtype: ~azure.ai.project.models._models.ConnectionsListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) + + _request = build_connections_list_request( + category=category, + include_all=include_all, + target=target, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def _get(self, connection_name: str, **kwargs: Any) -> _models._models.ConnectionsListSecretsResponse: + """Get the details of a single connection, without credentials. + + :param connection_name: Connection Name. Required. + :type connection_name: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.project.models._models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + + _request = build_connections_get_request( + connection_name=connection_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def _list_secrets( + self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: ... + @overload + async def _list_secrets( + self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: ... + @overload + async def _list_secrets( + self, connection_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: ... + + @distributed_trace_async + async def _list_secrets( + self, connection_name: str, body: Union[JSON, IO[bytes]] = _Unset, *, ignored: str = _Unset, **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: + """Get the details of a single connection, including credentials (if available). + + :param connection_name: Connection Name. Required. + :type connection_name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword ignored: The body is ignored. TODO: Can we remove this?. Required. + :paramtype ignored: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.project.models._models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + + if body is _Unset: + if ignored is _Unset: + raise TypeError("missing required argument: ignored") + body = {"ignored": ignored} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_connections_list_secrets_request( + connection_name=connection_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class EvaluationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.project.aio.AIProjectClient`'s + :attr:`evaluations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: + """Resource read operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + _request = build_evaluations_get_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def create( + self, evaluation: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Required. + :type evaluation: ~azure.ai.project.models.Evaluation + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, evaluation: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Required. + :type evaluation: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, evaluation: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Required. + :type evaluation: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Is one of the following types: Evaluation, JSON, + IO[bytes] Required. + :type evaluation: ~azure.ai.project.models.Evaluation or JSON or IO[bytes] + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(evaluation, (IOBase, bytes)): + _content = evaluation + else: + _content = json.dumps(evaluation, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list( + self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> AsyncIterable["_models.Evaluation"]: + """Resource list operation template. + + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of Evaluation + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.project.models.Evaluation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_list_request( + top=top, + skip=skip, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @overload + async def update( + self, + id: str, + resource: _models.Evaluation, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: ~azure.ai.project.models.Evaluation + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update( + self, id: str, resource: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update( + self, id: str, resource: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update( + self, id: str, resource: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Is one of the following types: Evaluation, JSON, + IO[bytes] Required. + :type resource: ~azure.ai.project.models.Evaluation or JSON or IO[bytes] + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + content_type = content_type or "application/merge-patch+json" + _content = None + if isinstance(resource, (IOBase, bytes)): + _content = resource + else: + _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_update_request( + id=id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_schedule(self, name: str, **kwargs: Any) -> _models.EvaluationSchedule: + """Resource read operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.project.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) + + _request = build_evaluations_get_schedule_request( + name=name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_or_replace_schedule( + self, name: str, resource: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Required. + :type resource: ~azure.ai.project.models.EvaluationSchedule + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.project.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_replace_schedule( + self, name: str, resource: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Required. + :type resource: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.project.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_replace_schedule( + self, name: str, resource: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Required. + :type resource: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.project.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_or_replace_schedule( + self, name: str, resource: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Is one of the following types: EvaluationSchedule, + JSON, IO[bytes] Required. + :type resource: ~azure.ai.project.models.EvaluationSchedule or JSON or IO[bytes] + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.project.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(resource, (IOBase, bytes)): + _content = resource + else: + _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_create_or_replace_schedule_request( + name=name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_schedule( + self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> AsyncIterable["_models.EvaluationSchedule"]: + """Resource list operation template. + + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of EvaluationSchedule + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.project.models.EvaluationSchedule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.EvaluationSchedule]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_list_schedule_request( + top=top, + skip=skip, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.EvaluationSchedule], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def delete_schedule(self, name: str, **kwargs: Any) -> None: + """Resource delete operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_evaluations_delete_schedule_request( + name=name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-project/azure/ai/project/models/__init__.py b/sdk/ai/azure-ai-project/azure/ai/project/models/__init__.py new file mode 100644 index 000000000000..f6ed04e4637b --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/models/__init__.py @@ -0,0 +1,376 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + + +from ._models import ( # type: ignore + Agent, + AgentDeletionStatus, + AgentThread, + AgentThreadCreationOptions, + AgentsApiResponseFormat, + AgentsNamedToolChoice, + ApplicationInsightsConfiguration, + AzureAISearchResource, + AzureAISearchToolDefinition, + BingGroundingToolDefinition, + CodeInterpreterToolDefinition, + CodeInterpreterToolResource, + ConnectionListResource, + ConnectionResource, + CronTrigger, + Dataset, + Evaluation, + EvaluationSchedule, + EvaluatorConfiguration, + FileContentResponse, + FileDeletionStatus, + FileListResponse, + FileSearchToolDefinition, + FileSearchToolDefinitionDetails, + FileSearchToolResource, + FunctionDefinition, + FunctionName, + FunctionToolDefinition, + IndexResource, + InputData, + MessageAttachment, + MessageContent, + MessageDelta, + MessageDeltaChunk, + MessageDeltaContent, + MessageDeltaImageFileContent, + MessageDeltaImageFileContentObject, + MessageDeltaTextAnnotation, + MessageDeltaTextContent, + MessageDeltaTextContentObject, + MessageDeltaTextFileCitationAnnotation, + MessageDeltaTextFileCitationAnnotationObject, + MessageDeltaTextFilePathAnnotation, + MessageDeltaTextFilePathAnnotationObject, + MessageImageFileContent, + MessageImageFileDetails, + MessageIncompleteDetails, + MessageTextAnnotation, + MessageTextContent, + MessageTextDetails, + MessageTextFileCitationAnnotation, + MessageTextFileCitationDetails, + MessageTextFilePathAnnotation, + MessageTextFilePathDetails, + MicrosoftFabricToolDefinition, + OpenAIFile, + OpenAIPageableListOfAgent, + OpenAIPageableListOfRunStep, + OpenAIPageableListOfThreadMessage, + OpenAIPageableListOfThreadRun, + OpenAIPageableListOfVectorStore, + OpenAIPageableListOfVectorStoreFile, + RecurrenceSchedule, + RecurrenceTrigger, + RequiredAction, + RequiredFunctionToolCall, + RequiredFunctionToolCallDetails, + RequiredToolCall, + RunCompletionUsage, + RunError, + RunStep, + RunStepAzureAISearchToolCall, + RunStepBingGroundingToolCall, + RunStepCodeInterpreterImageOutput, + RunStepCodeInterpreterImageReference, + RunStepCodeInterpreterLogOutput, + RunStepCodeInterpreterToolCall, + RunStepCodeInterpreterToolCallDetails, + RunStepCodeInterpreterToolCallOutput, + RunStepCompletionUsage, + RunStepDelta, + RunStepDeltaChunk, + RunStepDeltaCodeInterpreterDetailItemObject, + RunStepDeltaCodeInterpreterImageOutput, + RunStepDeltaCodeInterpreterImageOutputObject, + RunStepDeltaCodeInterpreterLogOutput, + RunStepDeltaCodeInterpreterOutput, + RunStepDeltaCodeInterpreterToolCall, + RunStepDeltaDetail, + RunStepDeltaFileSearchToolCall, + RunStepDeltaFunction, + RunStepDeltaFunctionToolCall, + RunStepDeltaMessageCreation, + RunStepDeltaMessageCreationObject, + RunStepDeltaToolCall, + RunStepDeltaToolCallObject, + RunStepDetails, + RunStepError, + RunStepFileSearchToolCall, + RunStepFunctionToolCall, + RunStepFunctionToolCallDetails, + RunStepMessageCreationDetails, + RunStepMessageCreationReference, + RunStepMicrosoftFabricToolCall, + RunStepSharepointToolCall, + RunStepToolCall, + RunStepToolCallDetails, + SamplingStrategy, + SharepointToolDefinition, + SubmitToolOutputsAction, + SubmitToolOutputsDetails, + SystemData, + ThreadDeletionStatus, + ThreadMessage, + ThreadMessageOptions, + ThreadRun, + ToolDefinition, + ToolOutput, + ToolResources, + Trigger, + TruncationObject, + UpdateCodeInterpreterToolResourceOptions, + UpdateFileSearchToolResourceOptions, + UpdateToolResourcesOptions, + VectorStore, + VectorStoreAutoChunkingStrategyRequest, + VectorStoreAutoChunkingStrategyResponse, + VectorStoreChunkingStrategyRequest, + VectorStoreChunkingStrategyResponse, + VectorStoreDeletionStatus, + VectorStoreExpirationPolicy, + VectorStoreFile, + VectorStoreFileBatch, + VectorStoreFileCount, + VectorStoreFileDeletionStatus, + VectorStoreFileError, + VectorStoreStaticChunkingStrategyOptions, + VectorStoreStaticChunkingStrategyRequest, + VectorStoreStaticChunkingStrategyResponse, +) + +from ._enums import ( # type: ignore + AgentStreamEvent, + AgentsApiResponseFormatMode, + AgentsApiToolChoiceOptionMode, + AgentsNamedToolChoiceType, + ApiResponseFormat, + AuthenticationType, + ConnectionType, + DoneEvent, + ErrorEvent, + FilePurpose, + FileState, + Frequency, + IncompleteRunDetails, + ListSortOrder, + MessageIncompleteDetailsReason, + MessageRole, + MessageStatus, + MessageStreamEvent, + RunStatus, + RunStepErrorCode, + RunStepStatus, + RunStepStreamEvent, + RunStepType, + RunStreamEvent, + ThreadStreamEvent, + TruncationStrategy, + VectorStoreChunkingStrategyRequestType, + VectorStoreChunkingStrategyResponseType, + VectorStoreExpirationPolicyAnchor, + VectorStoreFileBatchStatus, + VectorStoreFileErrorCode, + VectorStoreFileStatus, + VectorStoreFileStatusFilter, + VectorStoreStatus, + WeekDays, +) +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "Agent", + "AgentDeletionStatus", + "AgentThread", + "AgentThreadCreationOptions", + "AgentsApiResponseFormat", + "AgentsNamedToolChoice", + "ApplicationInsightsConfiguration", + "AzureAISearchResource", + "AzureAISearchToolDefinition", + "BingGroundingToolDefinition", + "CodeInterpreterToolDefinition", + "CodeInterpreterToolResource", + "ConnectionListResource", + "ConnectionResource", + "CronTrigger", + "Dataset", + "Evaluation", + "EvaluationSchedule", + "EvaluatorConfiguration", + "FileContentResponse", + "FileDeletionStatus", + "FileListResponse", + "FileSearchToolDefinition", + "FileSearchToolDefinitionDetails", + "FileSearchToolResource", + "FunctionDefinition", + "FunctionName", + "FunctionToolDefinition", + "IndexResource", + "InputData", + "MessageAttachment", + "MessageContent", + "MessageDelta", + "MessageDeltaChunk", + "MessageDeltaContent", + "MessageDeltaImageFileContent", + "MessageDeltaImageFileContentObject", + "MessageDeltaTextAnnotation", + "MessageDeltaTextContent", + "MessageDeltaTextContentObject", + "MessageDeltaTextFileCitationAnnotation", + "MessageDeltaTextFileCitationAnnotationObject", + "MessageDeltaTextFilePathAnnotation", + "MessageDeltaTextFilePathAnnotationObject", + "MessageImageFileContent", + "MessageImageFileDetails", + "MessageIncompleteDetails", + "MessageTextAnnotation", + "MessageTextContent", + "MessageTextDetails", + "MessageTextFileCitationAnnotation", + "MessageTextFileCitationDetails", + "MessageTextFilePathAnnotation", + "MessageTextFilePathDetails", + "MicrosoftFabricToolDefinition", + "OpenAIFile", + "OpenAIPageableListOfAgent", + "OpenAIPageableListOfRunStep", + "OpenAIPageableListOfThreadMessage", + "OpenAIPageableListOfThreadRun", + "OpenAIPageableListOfVectorStore", + "OpenAIPageableListOfVectorStoreFile", + "RecurrenceSchedule", + "RecurrenceTrigger", + "RequiredAction", + "RequiredFunctionToolCall", + "RequiredFunctionToolCallDetails", + "RequiredToolCall", + "RunCompletionUsage", + "RunError", + "RunStep", + "RunStepAzureAISearchToolCall", + "RunStepBingGroundingToolCall", + "RunStepCodeInterpreterImageOutput", + "RunStepCodeInterpreterImageReference", + "RunStepCodeInterpreterLogOutput", + "RunStepCodeInterpreterToolCall", + "RunStepCodeInterpreterToolCallDetails", + "RunStepCodeInterpreterToolCallOutput", + "RunStepCompletionUsage", + "RunStepDelta", + "RunStepDeltaChunk", + "RunStepDeltaCodeInterpreterDetailItemObject", + "RunStepDeltaCodeInterpreterImageOutput", + "RunStepDeltaCodeInterpreterImageOutputObject", + "RunStepDeltaCodeInterpreterLogOutput", + "RunStepDeltaCodeInterpreterOutput", + "RunStepDeltaCodeInterpreterToolCall", + "RunStepDeltaDetail", + "RunStepDeltaFileSearchToolCall", + "RunStepDeltaFunction", + "RunStepDeltaFunctionToolCall", + "RunStepDeltaMessageCreation", + "RunStepDeltaMessageCreationObject", + "RunStepDeltaToolCall", + "RunStepDeltaToolCallObject", + "RunStepDetails", + "RunStepError", + "RunStepFileSearchToolCall", + "RunStepFunctionToolCall", + "RunStepFunctionToolCallDetails", + "RunStepMessageCreationDetails", + "RunStepMessageCreationReference", + "RunStepMicrosoftFabricToolCall", + "RunStepSharepointToolCall", + "RunStepToolCall", + "RunStepToolCallDetails", + "SamplingStrategy", + "SharepointToolDefinition", + "SubmitToolOutputsAction", + "SubmitToolOutputsDetails", + "SystemData", + "ThreadDeletionStatus", + "ThreadMessage", + "ThreadMessageOptions", + "ThreadRun", + "ToolDefinition", + "ToolOutput", + "ToolResources", + "Trigger", + "TruncationObject", + "UpdateCodeInterpreterToolResourceOptions", + "UpdateFileSearchToolResourceOptions", + "UpdateToolResourcesOptions", + "VectorStore", + "VectorStoreAutoChunkingStrategyRequest", + "VectorStoreAutoChunkingStrategyResponse", + "VectorStoreChunkingStrategyRequest", + "VectorStoreChunkingStrategyResponse", + "VectorStoreDeletionStatus", + "VectorStoreExpirationPolicy", + "VectorStoreFile", + "VectorStoreFileBatch", + "VectorStoreFileCount", + "VectorStoreFileDeletionStatus", + "VectorStoreFileError", + "VectorStoreStaticChunkingStrategyOptions", + "VectorStoreStaticChunkingStrategyRequest", + "VectorStoreStaticChunkingStrategyResponse", + "AgentStreamEvent", + "AgentsApiResponseFormatMode", + "AgentsApiToolChoiceOptionMode", + "AgentsNamedToolChoiceType", + "ApiResponseFormat", + "AuthenticationType", + "ConnectionType", + "DoneEvent", + "ErrorEvent", + "FilePurpose", + "FileState", + "Frequency", + "IncompleteRunDetails", + "ListSortOrder", + "MessageIncompleteDetailsReason", + "MessageRole", + "MessageStatus", + "MessageStreamEvent", + "RunStatus", + "RunStepErrorCode", + "RunStepStatus", + "RunStepStreamEvent", + "RunStepType", + "RunStreamEvent", + "ThreadStreamEvent", + "TruncationStrategy", + "VectorStoreChunkingStrategyRequestType", + "VectorStoreChunkingStrategyResponseType", + "VectorStoreExpirationPolicyAnchor", + "VectorStoreFileBatchStatus", + "VectorStoreFileErrorCode", + "VectorStoreFileStatus", + "VectorStoreFileStatusFilter", + "VectorStoreStatus", + "WeekDays", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/ai/azure-ai-project/azure/ai/project/models/_enums.py b/sdk/ai/azure-ai-project/azure/ai/project/models/_enums.py new file mode 100644 index 000000000000..7ca731b7639b --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/models/_enums.py @@ -0,0 +1,513 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class AgentsApiResponseFormatMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Represents the mode in which the model will handle the return format of a tool call.""" + + AUTO = "auto" + """Default value. Let the model handle the return format.""" + NONE = "none" + """Setting the value to ``none``\\ , will result in a 400 Bad request.""" + + +class AgentsApiToolChoiceOptionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies how the tool choice will be used.""" + + NONE = "none" + """The model will not call a function and instead generates a message.""" + AUTO = "auto" + """The model can pick between generating a message or calling a function.""" + + +class AgentsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Available tool types for agents named tools.""" + + FUNCTION = "function" + """Tool type ``function``""" + CODE_INTERPRETER = "code_interpreter" + """Tool type ``code_interpreter``""" + FILE_SEARCH = "file_search" + """Tool type ``file_search``""" + BING_GROUNDING = "bing_grounding" + """Tool type ``bing_grounding``""" + MICROSOFT_FABRIC = "microsoft_fabric" + """Tool type ``microsoft_fabric``""" + SHAREPOINT = "sharepoint" + """Tool type ``sharepoint``""" + AZURE_AI_SEARCH = "azure_ai_search" + """Tool type ``azure_ai_search``""" + + +class AgentStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Each event in a server-sent events stream has an ``event`` and ``data`` property: + + .. code-block:: + + event: thread.created + data: {"id": "thread_123", "object": "thread", ...} + + We emit events whenever a new object is created, transitions to a new state, or is being + streamed in parts (deltas). For example, we emit ``thread.run.created`` when a new run + is created, ``thread.run.completed`` when a run completes, and so on. When an Agent chooses + to create a message during a run, we emit a ``thread.message.created event``\\ , a + ``thread.message.in_progress`` event, many ``thread.message.delta`` events, and finally a + ``thread.message.completed`` event. + + We may add additional events over time, so we recommend handling unknown events gracefully + in your code. + """ + + THREAD_CREATED = "thread.created" + """Event sent when a new thread is created. The data of this event is of type AgentThread""" + THREAD_RUN_CREATED = "thread.run.created" + """Event sent when a new run is created. The data of this event is of type ThreadRun""" + THREAD_RUN_QUEUED = "thread.run.queued" + """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" + THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" + """Event sent when a run moves to ``in_progress`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" + """Event sent when a run moves to ``requires_action`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_COMPLETED = "thread.run.completed" + """Event sent when a run is completed. The data of this event is of type ThreadRun""" + THREAD_RUN_FAILED = "thread.run.failed" + """Event sent when a run fails. The data of this event is of type ThreadRun""" + THREAD_RUN_CANCELLING = "thread.run.cancelling" + """Event sent when a run moves to ``cancelling`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_CANCELLED = "thread.run.cancelled" + """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" + THREAD_RUN_EXPIRED = "thread.run.expired" + """Event sent when a run is expired. The data of this event is of type ThreadRun""" + THREAD_RUN_STEP_CREATED = "thread.run.step.created" + """Event sent when a new thread run step is created. The data of this event is of type RunStep""" + THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" + """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type + RunStep""" + THREAD_RUN_STEP_DELTA = "thread.run.step.delta" + """Event sent when a run step is being streamed. The data of this event is of type + RunStepDeltaChunk""" + THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" + """Event sent when a run step is completed. The data of this event is of type RunStep""" + THREAD_RUN_STEP_FAILED = "thread.run.step.failed" + """Event sent when a run step fails. The data of this event is of type RunStep""" + THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" + """Event sent when a run step is cancelled. The data of this event is of type RunStep""" + THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" + """Event sent when a run step is expired. The data of this event is of type RunStep""" + THREAD_MESSAGE_CREATED = "thread.message.created" + """Event sent when a new message is created. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" + """Event sent when a message moves to ``in_progress`` status. The data of this event is of type + ThreadMessage""" + THREAD_MESSAGE_DELTA = "thread.message.delta" + """Event sent when a message is being streamed. The data of this event is of type + MessageDeltaChunk""" + THREAD_MESSAGE_COMPLETED = "thread.message.completed" + """Event sent when a message is completed. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" + """Event sent before a message is completed. The data of this event is of type ThreadMessage""" + ERROR = "error" + """Event sent when an error occurs, such as an internal server error or a timeout.""" + DONE = "done" + """Event sent when the stream is done.""" + + +class ApiResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible API response formats.""" + + TEXT = "text" + """``text`` format should be used for requests involving any sort of ToolCall.""" + JSON_OBJECT = "json_object" + """Using ``json_object`` format will limit the usage of ToolCall to only functions.""" + + +class AuthenticationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Authentication type used by Azure AI service to connect to another service.""" + + API_KEY = "ApiKey" + """API Key authentication""" + AAD = "AAD" + """Entra ID authentication""" + SAS = "SAS" + """Shared Access Signature (SAS) authentication""" + + +class ConnectionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The Type (or category) of the connection.""" + + AZURE_OPEN_AI = "AzureOpenAI" + """Azure OpenAI service""" + SERVERLESS = "Serverless" + """Serverless API service""" + AZURE_BLOB_STORAGE = "AzureBlob" + """Azure Blob Storage""" + AI_SERVICES = "AIServices" + """Azure AI Services""" + + +class DoneEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Terminal event indicating the successful end of a stream.""" + + DONE = "done" + """Event sent when the stream is done.""" + + +class ErrorEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Terminal event indicating a server side error while streaming.""" + + ERROR = "error" + """Event sent when an error occurs, such as an internal server error or a timeout.""" + + +class FilePurpose(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible values denoting the intended usage of a file.""" + + FINE_TUNE = "fine-tune" + """Indicates a file is used for fine tuning input.""" + FINE_TUNE_RESULTS = "fine-tune-results" + """Indicates a file is used for fine tuning results.""" + AGENTS = "assistants" + """Indicates a file is used as input to agents.""" + AGENTS_OUTPUT = "assistants_output" + """Indicates a file is used as output by agents.""" + BATCH = "batch" + """Indicates a file is used as input to .""" + BATCH_OUTPUT = "batch_output" + """Indicates a file is used as output by a vector store batch operation.""" + VISION = "vision" + """Indicates a file is used as input to a vision operation.""" + + +class FileState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The state of the file.""" + + UPLOADED = "uploaded" + """The file has been uploaded but it's not yet processed. This state is not returned by Azure + OpenAI and exposed only for + compatibility. It can be categorized as an inactive state.""" + PENDING = "pending" + """The operation was created and is not queued to be processed in the future. It can be + categorized as an inactive state.""" + RUNNING = "running" + """The operation has started to be processed. It can be categorized as an active state.""" + PROCESSED = "processed" + """The operation has successfully processed and is ready for consumption. It can be categorized as + a terminal state.""" + ERROR = "error" + """The operation has completed processing with a failure and cannot be further consumed. It can be + categorized as a terminal state.""" + DELETING = "deleting" + """The entity is in the process to be deleted. This state is not returned by Azure OpenAI and + exposed only for compatibility. + It can be categorized as an active state.""" + DELETED = "deleted" + """The entity has been deleted but may still be referenced by other entities predating the + deletion. It can be categorized as a + terminal state.""" + + +class Frequency(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Frequency of the schedule - day, week, month, hour, minute.""" + + MONTH = "Month" + WEEK = "Week" + DAY = "Day" + HOUR = "Hour" + MINUTE = "Minute" + + +class IncompleteRunDetails(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The reason why the run is incomplete. This will point to which specific token limit was reached + over the course of the run. + """ + + MAX_COMPLETION_TOKENS = "max_completion_tokens" + """Maximum completion tokens exceeded""" + MAX_PROMPT_TOKENS = "max_prompt_tokens" + """Maximum prompt tokens exceeded""" + + +class ListSortOrder(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The available sorting options when requesting a list of response objects.""" + + ASCENDING = "asc" + """Specifies an ascending sort order.""" + DESCENDING = "desc" + """Specifies a descending sort order.""" + + +class MessageIncompleteDetailsReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """A set of reasons describing why a message is marked as incomplete.""" + + CONTENT_FILTER = "content_filter" + """The run generating the message was terminated due to content filter flagging.""" + MAX_TOKENS = "max_tokens" + """The run generating the message exhausted available tokens before completion.""" + RUN_CANCELLED = "run_cancelled" + """The run generating the message was cancelled before completion.""" + RUN_FAILED = "run_failed" + """The run generating the message failed.""" + RUN_EXPIRED = "run_expired" + """The run generating the message expired.""" + + +class MessageRole(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible values for roles attributed to messages in a thread.""" + + USER = "user" + """The role representing the end-user.""" + AGENT = "assistant" + """The role representing the agent.""" + + +class MessageStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible execution status values for a thread message.""" + + IN_PROGRESS = "in_progress" + """A run is currently creating this message.""" + INCOMPLETE = "incomplete" + """This message is incomplete. See incomplete_details for more information.""" + COMPLETED = "completed" + """This message was successfully completed by a run.""" + + +class MessageStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Message operation related streaming events.""" + + THREAD_MESSAGE_CREATED = "thread.message.created" + """Event sent when a new message is created. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" + """Event sent when a message moves to ``in_progress`` status. The data of this event is of type + ThreadMessage""" + THREAD_MESSAGE_DELTA = "thread.message.delta" + """Event sent when a message is being streamed. The data of this event is of type + MessageDeltaChunk""" + THREAD_MESSAGE_COMPLETED = "thread.message.completed" + """Event sent when a message is completed. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" + """Event sent before a message is completed. The data of this event is of type ThreadMessage""" + + +class RunStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible values for the status of an agent thread run.""" + + QUEUED = "queued" + """Represents a run that is queued to start.""" + IN_PROGRESS = "in_progress" + """Represents a run that is in progress.""" + REQUIRES_ACTION = "requires_action" + """Represents a run that needs another operation, such as tool output submission, to continue.""" + CANCELLING = "cancelling" + """Represents a run that is in the process of cancellation.""" + CANCELLED = "cancelled" + """Represents a run that has been cancelled.""" + FAILED = "failed" + """Represents a run that failed.""" + COMPLETED = "completed" + """Represents a run that successfully completed.""" + EXPIRED = "expired" + """Represents a run that expired before it could otherwise finish.""" + + +class RunStepErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible error code values attributable to a failed run step.""" + + SERVER_ERROR = "server_error" + """Represents a server error.""" + RATE_LIMIT_EXCEEDED = "rate_limit_exceeded" + """Represents an error indicating configured rate limits were exceeded.""" + + +class RunStepStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible values for the status of a run step.""" + + IN_PROGRESS = "in_progress" + """Represents a run step still in progress.""" + CANCELLED = "cancelled" + """Represents a run step that was cancelled.""" + FAILED = "failed" + """Represents a run step that failed.""" + COMPLETED = "completed" + """Represents a run step that successfully completed.""" + EXPIRED = "expired" + """Represents a run step that expired before otherwise finishing.""" + + +class RunStepStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Run step operation related streaming events.""" + + THREAD_RUN_STEP_CREATED = "thread.run.step.created" + """Event sent when a new thread run step is created. The data of this event is of type RunStep""" + THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" + """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type + RunStep""" + THREAD_RUN_STEP_DELTA = "thread.run.step.delta" + """Event sent when a run step is being streamed. The data of this event is of type + RunStepDeltaChunk""" + THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" + """Event sent when a run step is completed. The data of this event is of type RunStep""" + THREAD_RUN_STEP_FAILED = "thread.run.step.failed" + """Event sent when a run step fails. The data of this event is of type RunStep""" + THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" + """Event sent when a run step is cancelled. The data of this event is of type RunStep""" + THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" + """Event sent when a run step is expired. The data of this event is of type RunStep""" + + +class RunStepType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible types of run steps.""" + + MESSAGE_CREATION = "message_creation" + """Represents a run step to create a message.""" + TOOL_CALLS = "tool_calls" + """Represents a run step that calls tools.""" + + +class RunStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Run operation related streaming events.""" + + THREAD_RUN_CREATED = "thread.run.created" + """Event sent when a new run is created. The data of this event is of type ThreadRun""" + THREAD_RUN_QUEUED = "thread.run.queued" + """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" + THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" + """Event sent when a run moves to ``in_progress`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" + """Event sent when a run moves to ``requires_action`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_COMPLETED = "thread.run.completed" + """Event sent when a run is completed. The data of this event is of type ThreadRun""" + THREAD_RUN_FAILED = "thread.run.failed" + """Event sent when a run fails. The data of this event is of type ThreadRun""" + THREAD_RUN_CANCELLING = "thread.run.cancelling" + """Event sent when a run moves to ``cancelling`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_CANCELLED = "thread.run.cancelled" + """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" + THREAD_RUN_EXPIRED = "thread.run.expired" + """Event sent when a run is expired. The data of this event is of type ThreadRun""" + + +class ThreadStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Thread operation related streaming events.""" + + THREAD_CREATED = "thread.created" + """Event sent when a new thread is created. The data of this event is of type AgentThread""" + + +class TruncationStrategy(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible truncation strategies for the thread.""" + + AUTO = "auto" + """Default value. Messages in the middle of the thread will be dropped to fit the context length + of the model.""" + LAST_MESSAGES = "last_messages" + """The thread will truncate to the ``lastMessages`` count of recent messages.""" + + +class VectorStoreChunkingStrategyRequestType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of chunking strategy.""" + + AUTO = "auto" + STATIC = "static" + + +class VectorStoreChunkingStrategyResponseType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of chunking strategy.""" + + OTHER = "other" + STATIC = "static" + + +class VectorStoreExpirationPolicyAnchor(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Describes the relationship between the days and the expiration of this vector store.""" + + LAST_ACTIVE_AT = "last_active_at" + """The expiration policy is based on the last time the vector store was active.""" + + +class VectorStoreFileBatchStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The status of the vector store file batch.""" + + IN_PROGRESS = "in_progress" + """The vector store is still processing this file batch.""" + COMPLETED = "completed" + """the vector store file batch is ready for use.""" + CANCELLED = "cancelled" + """The vector store file batch was cancelled.""" + FAILED = "failed" + """The vector store file batch failed to process.""" + + +class VectorStoreFileErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Error code variants for vector store file processing.""" + + INTERNAL_ERROR = "internal_error" + """An internal error occurred.""" + FILE_NOT_FOUND = "file_not_found" + """The file was not found.""" + PARSING_ERROR = "parsing_error" + """The file could not be parsed.""" + UNHANDLED_MIME_TYPE = "unhandled_mime_type" + """The file has an unhandled mime type.""" + + +class VectorStoreFileStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Vector store file status.""" + + IN_PROGRESS = "in_progress" + """The file is currently being processed.""" + COMPLETED = "completed" + """The file has been successfully processed.""" + FAILED = "failed" + """The file has failed to process.""" + CANCELLED = "cancelled" + """The file was cancelled.""" + + +class VectorStoreFileStatusFilter(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Query parameter filter for vector store file retrieval endpoint.""" + + IN_PROGRESS = "in_progress" + """Retrieve only files that are currently being processed""" + COMPLETED = "completed" + """Retrieve only files that have been successfully processed""" + FAILED = "failed" + """Retrieve only files that have failed to process""" + CANCELLED = "cancelled" + """Retrieve only files that were cancelled""" + + +class VectorStoreStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Vector store possible status.""" + + EXPIRED = "expired" + """expired status indicates that this vector store has expired and is no longer available for use.""" + IN_PROGRESS = "in_progress" + """in_progress status indicates that this vector store is still processing files.""" + COMPLETED = "completed" + """completed status indicates that this vector store is ready for use.""" + + +class WeekDays(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """WeekDay of the schedule - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday.""" + + MONDAY = "Monday" + TUESDAY = "Tuesday" + WEDNESDAY = "Wednesday" + THURSDAY = "Thursday" + FRIDAY = "Friday" + SATURDAY = "Saturday" + SUNDAY = "Sunday" diff --git a/sdk/ai/azure-ai-project/azure/ai/project/models/_models.py b/sdk/ai/azure-ai-project/azure/ai/project/models/_models.py new file mode 100644 index 000000000000..455ce2ffd760 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/models/_models.py @@ -0,0 +1,6105 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=useless-super-delegation + +import datetime +from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload + +from .. import _model_base +from .._model_base import rest_discriminator, rest_field +from ._enums import ( + AuthenticationType, + RunStepType, + VectorStoreChunkingStrategyRequestType, + VectorStoreChunkingStrategyResponseType, +) + +if TYPE_CHECKING: + from .. import _types, models as _models + + +class Agent(_model_base.Model): + """Represents an agent that can call the model and use tools. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always assistant. Required. Default value is + "assistant". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar name: The name of the agent. Required. + :vartype name: str + :ivar description: The description of the agent. Required. + :vartype description: str + :ivar model: The ID of the model to use. Required. + :vartype model: str + :ivar instructions: The system instructions for the agent to use. Required. + :vartype instructions: str + :ivar tools: The collection of tools enabled for the agent. Required. + :vartype tools: list[~azure.ai.project.models.ToolDefinition] + :ivar tool_resources: A set of resources that are used by the agent's tools. The resources are + specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Required. + :vartype tool_resources: ~azure.ai.project.models.ToolResources + :ivar temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Required. + :vartype temperature: float + :ivar top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Required. + :vartype top_p: float + :ivar response_format: The response format of the tool calls used by this agent. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat + :vartype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode or + ~azure.ai.project.models.AgentsApiResponseFormat + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["assistant"] = rest_field() + """The object type, which is always assistant. Required. Default value is \"assistant\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + name: str = rest_field() + """The name of the agent. Required.""" + description: str = rest_field() + """The description of the agent. Required.""" + model: str = rest_field() + """The ID of the model to use. Required.""" + instructions: str = rest_field() + """The system instructions for the agent to use. Required.""" + tools: List["_models.ToolDefinition"] = rest_field() + """The collection of tools enabled for the agent. Required.""" + tool_resources: "_models.ToolResources" = rest_field() + """A set of resources that are used by the agent's tools. The resources are specific to the type + of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Required.""" + temperature: float = rest_field() + """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, + while lower values like 0.2 will make it more focused and deterministic. Required.""" + top_p: float = rest_field() + """An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Required.""" + response_format: Optional["_types.AgentsApiResponseFormatOption"] = rest_field() + """The response format of the tool calls used by this agent. Is one of the following types: str, + Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + name: str, + description: str, + model: str, + instructions: str, + tools: List["_models.ToolDefinition"], + tool_resources: "_models.ToolResources", + temperature: float, + top_p: float, + metadata: Dict[str, str], + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["assistant"] = "assistant" + + +class AgentDeletionStatus(_model_base.Model): + """The status of an agent deletion operation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'assistant.deleted'. Required. Default value is + "assistant.deleted". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["assistant.deleted"] = rest_field() + """The object type, which is always 'assistant.deleted'. Required. Default value is + \"assistant.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["assistant.deleted"] = "assistant.deleted" + + +class AgentsApiResponseFormat(_model_base.Model): + """An object describing the expected output of the model. If ``json_object`` only ``function`` + type ``tools`` are allowed to be passed to the Run. + If ``text`` the model can return text or any value needed. + + :ivar type: Must be one of ``text`` or ``json_object``. Known values are: "text" and + "json_object". + :vartype type: str or ~azure.ai.project.models.ApiResponseFormat + """ + + type: Optional[Union[str, "_models.ApiResponseFormat"]] = rest_field() + """Must be one of ``text`` or ``json_object``. Known values are: \"text\" and \"json_object\".""" + + @overload + def __init__( + self, + *, + type: Optional[Union[str, "_models.ApiResponseFormat"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AgentsNamedToolChoice(_model_base.Model): + """Specifies a tool the model should use. Use to force the model to call a specific tool. + + + :ivar type: the type of tool. If type is ``function``\\ , the function name must be set. + Required. Known values are: "function", "code_interpreter", "file_search", "bing_grounding", + "microsoft_fabric", "sharepoint", and "azure_ai_search". + :vartype type: str or ~azure.ai.project.models.AgentsNamedToolChoiceType + :ivar function: The name of the function to call. + :vartype function: ~azure.ai.project.models.FunctionName + """ + + type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field() + """the type of tool. If type is ``function``\ , the function name must be set. Required. Known + values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", + \"microsoft_fabric\", \"sharepoint\", and \"azure_ai_search\".""" + function: Optional["_models.FunctionName"] = rest_field() + """The name of the function to call.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.AgentsNamedToolChoiceType"], + function: Optional["_models.FunctionName"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AgentThread(_model_base.Model): + """Information about a single thread associated with an agent. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread'. Required. Default value is "thread". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar tool_resources: A set of resources that are made available to the agent's tools in this + thread. The resources are specific to the type + of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list + of vector store IDs. Required. + :vartype tool_resources: ~azure.ai.project.models.ToolResources + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread"] = rest_field() + """The object type, which is always 'thread'. Required. Default value is \"thread\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + tool_resources: "_models.ToolResources" = rest_field() + """A set of resources that are made available to the agent's tools in this thread. The resources + are specific to the type + of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list + of vector store IDs. Required.""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + tool_resources: "_models.ToolResources", + metadata: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread"] = "thread" + + +class AgentThreadCreationOptions(_model_base.Model): + """The details used to create a new agent thread. + + :ivar messages: The initial messages to associate with the new thread. + :vartype messages: list[~azure.ai.project.models.ThreadMessageOptions] + :ivar tool_resources: A set of resources that are made available to the agent's tools in this + thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. + :vartype tool_resources: ~azure.ai.project.models.ToolResources + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. + :vartype metadata: dict[str, str] + """ + + messages: Optional[List["_models.ThreadMessageOptions"]] = rest_field() + """The initial messages to associate with the new thread.""" + tool_resources: Optional["_models.ToolResources"] = rest_field() + """A set of resources that are made available to the agent's tools in this thread. The resources + are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires + a list of vector store IDs.""" + metadata: Optional[Dict[str, str]] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length.""" + + @overload + def __init__( + self, + *, + messages: Optional[List["_models.ThreadMessageOptions"]] = None, + tool_resources: Optional["_models.ToolResources"] = None, + metadata: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class InputData(_model_base.Model): + """Abstract data class for input data configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ApplicationInsightsConfiguration, Dataset + + + :ivar type: Type of the data. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """Type of the data. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ApplicationInsightsConfiguration(InputData, discriminator="app_insights"): + """Data Source for Application Insights. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar type: Required. Default value is "app_insights". + :vartype type: str + :ivar resource_id: LogAnalytic Workspace resourceID associated with ApplicationInsights. + Required. + :vartype resource_id: str + :ivar query: Query to fetch the data. Required. + :vartype query: str + :ivar service_name: Service name. Required. + :vartype service_name: str + :ivar connection_string: Connection String to connect to ApplicationInsights. + :vartype connection_string: str + """ + + type: Literal["app_insights"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore + """Required. Default value is \"app_insights\".""" + resource_id: str = rest_field(name="resourceId") + """LogAnalytic Workspace resourceID associated with ApplicationInsights. Required.""" + query: str = rest_field() + """Query to fetch the data. Required.""" + service_name: str = rest_field(name="serviceName") + """Service name. Required.""" + connection_string: Optional[str] = rest_field(name="connectionString") + """Connection String to connect to ApplicationInsights.""" + + @overload + def __init__( + self, + *, + resource_id: str, + query: str, + service_name: str, + connection_string: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="app_insights", **kwargs) + + +class AzureAISearchResource(_model_base.Model): + """A set of index resources used by the ``azure_ai_search`` tool. + + :ivar index_list: The indices attached to this agent. There can be a maximum of 1 index + resource attached to the agent. + :vartype index_list: list[~azure.ai.project.models.IndexResource] + """ + + index_list: Optional[List["_models.IndexResource"]] = rest_field(name="indexes") + """The indices attached to this agent. There can be a maximum of 1 index + resource attached to the agent.""" + + @overload + def __init__( + self, + *, + index_list: Optional[List["_models.IndexResource"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolDefinition(_model_base.Model): + """An abstract representation of an input tool definition that an agent can use. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AzureAISearchToolDefinition, BingGroundingToolDefinition, CodeInterpreterToolDefinition, + FileSearchToolDefinition, FunctionToolDefinition, MicrosoftFabricToolDefinition, + SharepointToolDefinition + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AzureAISearchToolDefinition(ToolDefinition, discriminator="azure_ai_search"): + """The input definition information for an Azure AI search tool as used to configure an agent. + + + :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is + "azure_ai_search". + :vartype type: str + """ + + type: Literal["azure_ai_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'azure_ai_search'. Required. Default value is + \"azure_ai_search\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="azure_ai_search", **kwargs) + + +class BingGroundingToolDefinition(ToolDefinition, discriminator="bing_grounding"): + """The input definition information for a bing grounding search tool as used to configure an + agent. + + + :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is + "bing_grounding". + :vartype type: str + """ + + type: Literal["bing_grounding"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'bing_grounding'. Required. Default value is + \"bing_grounding\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="bing_grounding", **kwargs) + + +class CodeInterpreterToolDefinition(ToolDefinition, discriminator="code_interpreter"): + """The input definition information for a code interpreter tool as used to configure an agent. + + + :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is + "code_interpreter". + :vartype type: str + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'code_interpreter'. Required. Default value is + \"code_interpreter\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="code_interpreter", **kwargs) + + +class CodeInterpreterToolResource(_model_base.Model): + """A set of resources that are used by the ``code_interpreter`` tool. + + :ivar file_ids: A list of file IDs made available to the ``code_interpreter`` tool. There can + be a maximum of 20 files + associated with the tool. + :vartype file_ids: list[str] + """ + + file_ids: Optional[List[str]] = rest_field() + """A list of file IDs made available to the ``code_interpreter`` tool. There can be a maximum of + 20 files + associated with the tool.""" + + @overload + def __init__( + self, + *, + file_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ConnectionListResource(_model_base.Model): + """A set of connection resources currently used by either the ``bing_grounding``\\ , + ``microsoft_fabric``\\ , or ``sharepoint`` tools. + + :ivar connection_list: The connections attached to this agent. There can be a maximum of 1 + connection + resource attached to the agent. + :vartype connection_list: list[~azure.ai.project.models.ConnectionResource] + """ + + connection_list: Optional[List["_models.ConnectionResource"]] = rest_field(name="connections") + """The connections attached to this agent. There can be a maximum of 1 connection + resource attached to the agent.""" + + @overload + def __init__( + self, + *, + connection_list: Optional[List["_models.ConnectionResource"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ConnectionProperties(_model_base.Model): + """Connection properties. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ConnectionPropertiesAADAuth, ConnectionPropertiesApiKeyAuth, ConnectionPropertiesSASAuth + + + :ivar auth_type: Authentication type of the connection target. Required. Known values are: + "ApiKey", "AAD", and "SAS". + :vartype auth_type: str or ~azure.ai.project.models.AuthenticationType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + auth_type: str = rest_discriminator(name="authType") + """Authentication type of the connection target. Required. Known values are: \"ApiKey\", \"AAD\", + and \"SAS\".""" + + +class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): + """Connection properties for connections with AAD authentication (aka ``Entra ID passthrough``\\ + ). + + + :ivar auth_type: Authentication type of the connection target. Required. Entra ID + authentication + :vartype auth_type: str or ~azure.ai.project.models.AAD + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". + :vartype category: str or ~azure.ai.project.models.ConnectionType + :ivar target: The connection URL to be used for this service. Required. + :vartype target: str + """ + + auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. Entra ID authentication""" + category: Union[str, "_models.ConnectionType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", + \"AzureBlob\", and \"AIServices\".""" + target: str = rest_field() + """The connection URL to be used for this service. Required.""" + + +class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey"): + """Connection properties for connections with API key authentication. + + + :ivar auth_type: Authentication type of the connection target. Required. API Key authentication + :vartype auth_type: str or ~azure.ai.project.models.API_KEY + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". + :vartype category: str or ~azure.ai.project.models.ConnectionType + :ivar credentials: Credentials will only be present for authType=ApiKey. Required. + :vartype credentials: ~azure.ai.project.models._models.CredentialsApiKeyAuth + :ivar target: The connection URL to be used for this service. Required. + :vartype target: str + """ + + auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. API Key authentication""" + category: Union[str, "_models.ConnectionType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", + \"AzureBlob\", and \"AIServices\".""" + credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() + """Credentials will only be present for authType=ApiKey. Required.""" + target: str = rest_field() + """The connection URL to be used for this service. Required.""" + + +class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): + """Connection properties for connections with SAS authentication. + + + :ivar auth_type: Authentication type of the connection target. Required. Shared Access + Signature (SAS) authentication + :vartype auth_type: str or ~azure.ai.project.models.SAS + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". + :vartype category: str or ~azure.ai.project.models.ConnectionType + :ivar credentials: Credentials will only be present for authType=ApiKey. Required. + :vartype credentials: ~azure.ai.project.models._models.CredentialsSASAuth + :ivar target: The connection URL to be used for this service. Required. + :vartype target: str + """ + + auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. Shared Access Signature (SAS) + authentication""" + category: Union[str, "_models.ConnectionType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", + \"AzureBlob\", and \"AIServices\".""" + credentials: "_models._models.CredentialsSASAuth" = rest_field() + """Credentials will only be present for authType=ApiKey. Required.""" + target: str = rest_field() + """The connection URL to be used for this service. Required.""" + + +class ConnectionResource(_model_base.Model): + """A connection resource. + + + :ivar connection_id: A connection in a ConnectionListResource attached to this agent. Required. + :vartype connection_id: str + """ + + connection_id: str = rest_field() + """A connection in a ConnectionListResource attached to this agent. Required.""" + + @overload + def __init__( + self, + *, + connection_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ConnectionsListResponse(_model_base.Model): + """Response from the list operation. + + + :ivar value: A list of connection list secrets. Required. + :vartype value: list[~azure.ai.project.models._models.ConnectionsListSecretsResponse] + """ + + value: List["_models._models.ConnectionsListSecretsResponse"] = rest_field() + """A list of connection list secrets. Required.""" + + +class ConnectionsListSecretsResponse(_model_base.Model): + """Response from the listSecrets operation. + + + :ivar id: A unique identifier for the connection. Required. + :vartype id: str + :ivar name: The name of the resource. Required. + :vartype name: str + :ivar properties: The properties of the resource. Required. + :vartype properties: ~azure.ai.project.models._models.ConnectionProperties + """ + + id: str = rest_field() + """A unique identifier for the connection. Required.""" + name: str = rest_field() + """The name of the resource. Required.""" + properties: "_models._models.ConnectionProperties" = rest_field() + """The properties of the resource. Required.""" + + +class CredentialsApiKeyAuth(_model_base.Model): + """The credentials needed for API key authentication. + + + :ivar key: The API key. Required. + :vartype key: str + """ + + key: str = rest_field() + """The API key. Required.""" + + +class CredentialsSASAuth(_model_base.Model): + """The credentials needed for Shared Access Signatures (SAS) authentication. + + + :ivar sas: The Shared Access Signatures (SAS) token. Required. + :vartype sas: str + """ + + sas: str = rest_field(name="SAS") + """The Shared Access Signatures (SAS) token. Required.""" + + +class Trigger(_model_base.Model): + """Abstract data class for input data configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + CronTrigger, RecurrenceTrigger + + + :ivar type: Type of the trigger. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """Type of the trigger. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CronTrigger(Trigger, discriminator="Cron"): + """Cron Trigger Definition. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar type: Required. Default value is "Cron". + :vartype type: str + :ivar expression: Cron expression for the trigger. Required. + :vartype expression: str + """ + + type: Literal["Cron"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore + """Required. Default value is \"Cron\".""" + expression: str = rest_field() + """Cron expression for the trigger. Required.""" + + @overload + def __init__( + self, + *, + expression: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="Cron", **kwargs) + + +class Dataset(InputData, discriminator="dataset"): + """Dataset as source for evaluation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar type: Required. Default value is "dataset". + :vartype type: str + :ivar id: Evaluation input data. Required. + :vartype id: str + """ + + type: Literal["dataset"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore + """Required. Default value is \"dataset\".""" + id: str = rest_field() + """Evaluation input data. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="dataset", **kwargs) + + +class Evaluation(_model_base.Model): + """Evaluation Definition. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: Identifier of the evaluation. Required. + :vartype id: str + :ivar data: Data for evaluation. Required. + :vartype data: ~azure.ai.project.models.InputData + :ivar display_name: Display Name for evaluation. It helps to find evaluation easily in AI + Studio. It does not need to be unique. + :vartype display_name: str + :ivar description: Description of the evaluation. It can be used to store additional + information about the evaluation and is mutable. + :vartype description: str + :ivar system_data: Metadata containing createdBy and modifiedBy information. + :vartype system_data: ~azure.ai.project.models.SystemData + :ivar status: Status of the evaluation. It is set by service and is read-only. + :vartype status: str + :ivar tags: Evaluation's tags. Unlike properties, tags are fully mutable. + :vartype tags: dict[str, str] + :ivar properties: Evaluation's properties. Unlike tags, properties are add-only. Once added, a + property cannot be removed. + :vartype properties: dict[str, str] + :ivar evaluators: Evaluators to be used for the evaluation. Required. + :vartype evaluators: dict[str, ~azure.ai.project.models.EvaluatorConfiguration] + """ + + id: str = rest_field(visibility=["read"]) + """Identifier of the evaluation. Required.""" + data: "_models.InputData" = rest_field(visibility=["read", "create"]) + """Data for evaluation. Required.""" + display_name: Optional[str] = rest_field(name="displayName") + """Display Name for evaluation. It helps to find evaluation easily in AI Studio. It does not need + to be unique.""" + description: Optional[str] = rest_field() + """Description of the evaluation. It can be used to store additional information about the + evaluation and is mutable.""" + system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) + """Metadata containing createdBy and modifiedBy information.""" + status: Optional[str] = rest_field(visibility=["read"]) + """Status of the evaluation. It is set by service and is read-only.""" + tags: Optional[Dict[str, str]] = rest_field() + """Evaluation's tags. Unlike properties, tags are fully mutable.""" + properties: Optional[Dict[str, str]] = rest_field(visibility=["read", "create"]) + """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be + removed.""" + evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field(visibility=["read", "create"]) + """Evaluators to be used for the evaluation. Required.""" + + @overload + def __init__( + self, + *, + data: "_models.InputData", + evaluators: Dict[str, "_models.EvaluatorConfiguration"], + display_name: Optional[str] = None, + description: Optional[str] = None, + tags: Optional[Dict[str, str]] = None, + properties: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EvaluationSchedule(_model_base.Model): + """Evaluation Schedule Definition. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :vartype name: str + :ivar data: Data for evaluation. Required. + :vartype data: ~azure.ai.project.models.ApplicationInsightsConfiguration + :ivar description: Description of the evaluation. It can be used to store additional + information about the evaluation and is mutable. + :vartype description: str + :ivar system_data: Metadata containing createdBy and modifiedBy information. + :vartype system_data: ~azure.ai.project.models.SystemData + :ivar provisioning_status: Status of the evaluation. It is set by service and is read-only. + :vartype provisioning_status: str + :ivar tags: Evaluation's tags. Unlike properties, tags are fully mutable. + :vartype tags: dict[str, str] + :ivar properties: Evaluation's properties. Unlike tags, properties are add-only. Once added, a + property cannot be removed. + :vartype properties: dict[str, str] + :ivar evaluators: Evaluators to be used for the evaluation. Required. + :vartype evaluators: dict[str, ~azure.ai.project.models.EvaluatorConfiguration] + :ivar trigger: Trigger for the evaluation. Required. + :vartype trigger: ~azure.ai.project.models.Trigger + :ivar sampling_strategy: Sampling strategy for the evaluation. Required. + :vartype sampling_strategy: ~azure.ai.project.models.SamplingStrategy + """ + + name: str = rest_field(visibility=["read"]) + """Name of the schedule, which also serves as the unique identifier for the evaluation. Required.""" + data: "_models.ApplicationInsightsConfiguration" = rest_field(visibility=["read", "create"]) + """Data for evaluation. Required.""" + description: Optional[str] = rest_field() + """Description of the evaluation. It can be used to store additional information about the + evaluation and is mutable.""" + system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) + """Metadata containing createdBy and modifiedBy information.""" + provisioning_status: Optional[str] = rest_field(name="provisioningStatus", visibility=["read"]) + """Status of the evaluation. It is set by service and is read-only.""" + tags: Optional[Dict[str, str]] = rest_field() + """Evaluation's tags. Unlike properties, tags are fully mutable.""" + properties: Optional[Dict[str, str]] = rest_field(visibility=["read", "create"]) + """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be + removed.""" + evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field(visibility=["read", "create"]) + """Evaluators to be used for the evaluation. Required.""" + trigger: "_models.Trigger" = rest_field() + """Trigger for the evaluation. Required.""" + sampling_strategy: "_models.SamplingStrategy" = rest_field(name="samplingStrategy") + """Sampling strategy for the evaluation. Required.""" + + @overload + def __init__( + self, + *, + data: "_models.ApplicationInsightsConfiguration", + evaluators: Dict[str, "_models.EvaluatorConfiguration"], + trigger: "_models.Trigger", + sampling_strategy: "_models.SamplingStrategy", + description: Optional[str] = None, + tags: Optional[Dict[str, str]] = None, + properties: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EvaluatorConfiguration(_model_base.Model): + """Evaluator Configuration. + + + :ivar id: Identifier of the evaluator. Required. + :vartype id: str + :ivar init_params: Initialization parameters of the evaluator. + :vartype init_params: dict[str, any] + :ivar data_mapping: Data parameters of the evaluator. + :vartype data_mapping: dict[str, str] + """ + + id: str = rest_field() + """Identifier of the evaluator. Required.""" + init_params: Optional[Dict[str, Any]] = rest_field(name="initParams") + """Initialization parameters of the evaluator.""" + data_mapping: Optional[Dict[str, str]] = rest_field(name="dataMapping") + """Data parameters of the evaluator.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + init_params: Optional[Dict[str, Any]] = None, + data_mapping: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FileContentResponse(_model_base.Model): + """A response from a file get content operation. + + + :ivar content: The content of the file, in bytes. Required. + :vartype content: bytes + """ + + content: bytes = rest_field(format="base64") + """The content of the file, in bytes. Required.""" + + @overload + def __init__( + self, + *, + content: bytes, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FileDeletionStatus(_model_base.Model): + """A status response from a file deletion operation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'file'. Required. Default value is "file". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["file"] = rest_field() + """The object type, which is always 'file'. Required. Default value is \"file\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["file"] = "file" + + +class FileListResponse(_model_base.Model): + """The response data from a file list operation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always 'list'. Required. Default value is "list". + :vartype object: str + :ivar data: The files returned for the request. Required. + :vartype data: list[~azure.ai.project.models.OpenAIFile] + """ + + object: Literal["list"] = rest_field() + """The object type, which is always 'list'. Required. Default value is \"list\".""" + data: List["_models.OpenAIFile"] = rest_field() + """The files returned for the request. Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.OpenAIFile"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class FileSearchToolDefinition(ToolDefinition, discriminator="file_search"): + """The input definition information for a file search tool as used to configure an agent. + + + :ivar type: The object type, which is always 'file_search'. Required. Default value is + "file_search". + :vartype type: str + :ivar file_search: Options overrides for the file search tool. + :vartype file_search: ~azure.ai.project.models.FileSearchToolDefinitionDetails + """ + + type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" + file_search: Optional["_models.FileSearchToolDefinitionDetails"] = rest_field() + """Options overrides for the file search tool.""" + + @overload + def __init__( + self, + *, + file_search: Optional["_models.FileSearchToolDefinitionDetails"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_search", **kwargs) + + +class FileSearchToolDefinitionDetails(_model_base.Model): + """Options overrides for the file search tool. + + :ivar max_num_results: The maximum number of results the file search tool should output. The + default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 + inclusive. + + Note that the file search tool may output fewer than ``max_num_results`` results. See the file + search tool documentation for more information. + :vartype max_num_results: int + """ + + max_num_results: Optional[int] = rest_field() + """The maximum number of results the file search tool should output. The default is 20 for gpt-4* + models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than ``max_num_results`` results. See the file + search tool documentation for more information.""" + + @overload + def __init__( + self, + *, + max_num_results: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FileSearchToolResource(_model_base.Model): + """A set of resources that are used by the ``file_search`` tool. + + :ivar vector_store_ids: The ID of the vector store attached to this agent. There can be a + maximum of 1 vector + store attached to the agent. + :vartype vector_store_ids: list[str] + """ + + vector_store_ids: Optional[List[str]] = rest_field() + """The ID of the vector store attached to this agent. There can be a maximum of 1 vector + store attached to the agent.""" + + @overload + def __init__( + self, + *, + vector_store_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FunctionDefinition(_model_base.Model): + """The input definition information for a function. + + + :ivar name: The name of the function to be called. Required. + :vartype name: str + :ivar description: A description of what the function does, used by the model to choose when + and how to call the function. + :vartype description: str + :ivar parameters: The parameters the functions accepts, described as a JSON Schema object. + Required. + :vartype parameters: any + """ + + name: str = rest_field() + """The name of the function to be called. Required.""" + description: Optional[str] = rest_field() + """A description of what the function does, used by the model to choose when and how to call the + function.""" + parameters: Any = rest_field() + """The parameters the functions accepts, described as a JSON Schema object. Required.""" + + @overload + def __init__( + self, + *, + name: str, + parameters: Any, + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FunctionName(_model_base.Model): + """The function name that will be used, if using the ``function`` tool. + + + :ivar name: The name of the function to call. Required. + :vartype name: str + """ + + name: str = rest_field() + """The name of the function to call. Required.""" + + @overload + def __init__( + self, + *, + name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FunctionToolDefinition(ToolDefinition, discriminator="function"): + """The input definition information for a function tool as used to configure an agent. + + + :ivar type: The object type, which is always 'function'. Required. Default value is "function". + :vartype type: str + :ivar function: The definition of the concrete function that the function tool should call. + Required. + :vartype function: ~azure.ai.project.models.FunctionDefinition + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'function'. Required. Default value is \"function\".""" + function: "_models.FunctionDefinition" = rest_field() + """The definition of the concrete function that the function tool should call. Required.""" + + @overload + def __init__( + self, + *, + function: "_models.FunctionDefinition", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class IndexResource(_model_base.Model): + """A Index resource. + + + :ivar index_connection_id: An index connection id in an IndexResource attached to this agent. + Required. + :vartype index_connection_id: str + :ivar index_name: The name of an index in an IndexResource attached to this agent. Required. + :vartype index_name: str + """ + + index_connection_id: str = rest_field() + """An index connection id in an IndexResource attached to this agent. Required.""" + index_name: str = rest_field() + """The name of an index in an IndexResource attached to this agent. Required.""" + + @overload + def __init__( + self, + *, + index_connection_id: str, + index_name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageAttachment(_model_base.Model): + """This describes to which tools a file has been attached. + + + :ivar file_id: The ID of the file to attach to the message. Required. + :vartype file_id: str + :ivar tools: The tools to add to this file. Required. + :vartype tools: list[~azure.ai.project.models.CodeInterpreterToolDefinition or + ~azure.ai.project.models.FileSearchToolDefinition] + """ + + file_id: str = rest_field() + """The ID of the file to attach to the message. Required.""" + tools: List["_types.MessageAttachmentToolDefinition"] = rest_field() + """The tools to add to this file. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + tools: List["_types.MessageAttachmentToolDefinition"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageContent(_model_base.Model): + """An abstract representation of a single item of thread message content. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageImageFileContent, MessageTextContent + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDelta(_model_base.Model): + """Represents the typed 'delta' payload within a streaming message delta chunk. + + + :ivar role: The entity that produced the message. Required. Known values are: "user" and + "assistant". + :vartype role: str or ~azure.ai.project.models.MessageRole + :ivar content: The content of the message as an array of text and/or images. Required. + :vartype content: list[~azure.ai.project.models.MessageDeltaContent] + """ + + role: Union[str, "_models.MessageRole"] = rest_field() + """The entity that produced the message. Required. Known values are: \"user\" and \"assistant\".""" + content: List["_models.MessageDeltaContent"] = rest_field() + """The content of the message as an array of text and/or images. Required.""" + + @overload + def __init__( + self, + *, + role: Union[str, "_models.MessageRole"], + content: List["_models.MessageDeltaContent"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaChunk(_model_base.Model): + """Represents a message delta i.e. any changed fields on a message during streaming. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier of the message, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``thread.message.delta``. Required. Default + value is "thread.message.delta". + :vartype object: str + :ivar delta: The delta containing the fields that have changed on the Message. Required. + :vartype delta: ~azure.ai.project.models.MessageDelta + """ + + id: str = rest_field() + """The identifier of the message, which can be referenced in API endpoints. Required.""" + object: Literal["thread.message.delta"] = rest_field() + """The object type, which is always ``thread.message.delta``. Required. Default value is + \"thread.message.delta\".""" + delta: "_models.MessageDelta" = rest_field() + """The delta containing the fields that have changed on the Message. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + delta: "_models.MessageDelta", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.message.delta"] = "thread.message.delta" + + +class MessageDeltaContent(_model_base.Model): + """The abstract base representation of a partial streamed message content payload. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageDeltaImageFileContent, MessageDeltaTextContent + + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field() + """The index of the content part of the message. Required.""" + type: str = rest_discriminator(name="type") + """The type of content for this content part. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaImageFileContent(MessageDeltaContent, discriminator="image_file"): + """Represents a streamed image file content part within a streaming message delta chunk. + + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part, which is always "image_file.". Required. + Default value is "image_file". + :vartype type: str + :ivar image_file: The image_file data. + :vartype image_file: ~azure.ai.project.models.MessageDeltaImageFileContentObject + """ + + type: Literal["image_file"] = rest_discriminator(name="type") # type: ignore + """The type of content for this content part, which is always \"image_file.\". Required. Default + value is \"image_file\".""" + image_file: Optional["_models.MessageDeltaImageFileContentObject"] = rest_field() + """The image_file data.""" + + @overload + def __init__( + self, + *, + index: int, + image_file: Optional["_models.MessageDeltaImageFileContentObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image_file", **kwargs) + + +class MessageDeltaImageFileContentObject(_model_base.Model): + """Represents the 'image_file' payload within streaming image file content. + + :ivar file_id: The file ID of the image in the message content. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field() + """The file ID of the image in the message content.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextAnnotation(_model_base.Model): + """The abstract base representation of a streamed text content part's text annotation. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageDeltaTextFileCitationAnnotation, MessageDeltaTextFilePathAnnotation + + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field() + """The index of the annotation within a text content part. Required.""" + type: str = rest_discriminator(name="type") + """The type of the text content annotation. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextContent(MessageDeltaContent, discriminator="text"): + """Represents a streamed text content part within a streaming message delta chunk. + + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part, which is always "text.". Required. + Default value is "text". + :vartype type: str + :ivar text: The text content details. + :vartype text: ~azure.ai.project.models.MessageDeltaTextContentObject + """ + + type: Literal["text"] = rest_discriminator(name="type") # type: ignore + """The type of content for this content part, which is always \"text.\". Required. Default value + is \"text\".""" + text: Optional["_models.MessageDeltaTextContentObject"] = rest_field() + """The text content details.""" + + @overload + def __init__( + self, + *, + index: int, + text: Optional["_models.MessageDeltaTextContentObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="text", **kwargs) + + +class MessageDeltaTextContentObject(_model_base.Model): + """Represents the data of a streamed text content part within a streaming message delta chunk. + + :ivar value: The data that makes up the text. + :vartype value: str + :ivar annotations: Annotations for the text. + :vartype annotations: list[~azure.ai.project.models.MessageDeltaTextAnnotation] + """ + + value: Optional[str] = rest_field() + """The data that makes up the text.""" + annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = rest_field() + """Annotations for the text.""" + + @overload + def __init__( + self, + *, + value: Optional[str] = None, + annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextFileCitationAnnotation(MessageDeltaTextAnnotation, discriminator="file_citation"): + """Represents a streamed file citation applied to a streaming text content part. + + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation, which is always "file_citation.". + Required. Default value is "file_citation". + :vartype type: str + :ivar file_citation: The file citation information. + :vartype file_citation: ~azure.ai.project.models.MessageDeltaTextFileCitationAnnotationObject + :ivar text: The text in the message content that needs to be replaced. + :vartype text: str + :ivar start_index: The start index of this annotation in the content text. + :vartype start_index: int + :ivar end_index: The end index of this annotation in the content text. + :vartype end_index: int + """ + + type: Literal["file_citation"] = rest_discriminator(name="type") # type: ignore + """The type of the text content annotation, which is always \"file_citation.\". Required. Default + value is \"file_citation\".""" + file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = rest_field() + """The file citation information.""" + text: Optional[str] = rest_field() + """The text in the message content that needs to be replaced.""" + start_index: Optional[int] = rest_field() + """The start index of this annotation in the content text.""" + end_index: Optional[int] = rest_field() + """The end index of this annotation in the content text.""" + + @overload + def __init__( + self, + *, + index: int, + file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = None, + text: Optional[str] = None, + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_citation", **kwargs) + + +class MessageDeltaTextFileCitationAnnotationObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the data of a streamed file citation as applied to a streaming text content part. + + :ivar file_id: The ID of the specific file the citation is from. + :vartype file_id: str + :ivar quote: The specific quote in the cited file. + :vartype quote: str + """ + + file_id: Optional[str] = rest_field() + """The ID of the specific file the citation is from.""" + quote: Optional[str] = rest_field() + """The specific quote in the cited file.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + quote: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextFilePathAnnotation(MessageDeltaTextAnnotation, discriminator="file_path"): + """Represents a streamed file path annotation applied to a streaming text content part. + + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation, which is always "file_path.". Required. + Default value is "file_path". + :vartype type: str + :ivar file_path: The file path information. + :vartype file_path: ~azure.ai.project.models.MessageDeltaTextFilePathAnnotationObject + :ivar start_index: The start index of this annotation in the content text. + :vartype start_index: int + :ivar end_index: The end index of this annotation in the content text. + :vartype end_index: int + :ivar text: The text in the message content that needs to be replaced. + :vartype text: str + """ + + type: Literal["file_path"] = rest_discriminator(name="type") # type: ignore + """The type of the text content annotation, which is always \"file_path.\". Required. Default + value is \"file_path\".""" + file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = rest_field() + """The file path information.""" + start_index: Optional[int] = rest_field() + """The start index of this annotation in the content text.""" + end_index: Optional[int] = rest_field() + """The end index of this annotation in the content text.""" + text: Optional[str] = rest_field() + """The text in the message content that needs to be replaced.""" + + @overload + def __init__( + self, + *, + index: int, + file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = None, + start_index: Optional[int] = None, + end_index: Optional[int] = None, + text: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_path", **kwargs) + + +class MessageDeltaTextFilePathAnnotationObject(_model_base.Model): + """Represents the data of a streamed file path annotation as applied to a streaming text content + part. + + :ivar file_id: The file ID for the annotation. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field() + """The file ID for the annotation.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageImageFileContent(MessageContent, discriminator="image_file"): + """A representation of image file content in a thread message. + + + :ivar type: The object type, which is always 'image_file'. Required. Default value is + "image_file". + :vartype type: str + :ivar image_file: The image file for this thread message content item. Required. + :vartype image_file: ~azure.ai.project.models.MessageImageFileDetails + """ + + type: Literal["image_file"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'image_file'. Required. Default value is \"image_file\".""" + image_file: "_models.MessageImageFileDetails" = rest_field() + """The image file for this thread message content item. Required.""" + + @overload + def __init__( + self, + *, + image_file: "_models.MessageImageFileDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image_file", **kwargs) + + +class MessageImageFileDetails(_model_base.Model): + """An image reference, as represented in thread message content. + + + :ivar file_id: The ID for the file associated with this image. Required. + :vartype file_id: str + """ + + file_id: str = rest_field() + """The ID for the file associated with this image. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageIncompleteDetails(_model_base.Model): + """Information providing additional detail about a message entering an incomplete status. + + + :ivar reason: The provided reason describing why the message was marked as incomplete. + Required. Known values are: "content_filter", "max_tokens", "run_cancelled", "run_failed", and + "run_expired". + :vartype reason: str or ~azure.ai.project.models.MessageIncompleteDetailsReason + """ + + reason: Union[str, "_models.MessageIncompleteDetailsReason"] = rest_field() + """The provided reason describing why the message was marked as incomplete. Required. Known values + are: \"content_filter\", \"max_tokens\", \"run_cancelled\", \"run_failed\", and + \"run_expired\".""" + + @overload + def __init__( + self, + *, + reason: Union[str, "_models.MessageIncompleteDetailsReason"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextAnnotation(_model_base.Model): + """An abstract representation of an annotation to text thread message content. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageTextFileCitationAnnotation, MessageTextFilePathAnnotation + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + text: str = rest_field() + """The textual content associated with this text annotation item. Required.""" + + @overload + def __init__( + self, + *, + type: str, + text: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextContent(MessageContent, discriminator="text"): + """A representation of a textual item of thread message content. + + + :ivar type: The object type, which is always 'text'. Required. Default value is "text". + :vartype type: str + :ivar text: The text and associated annotations for this thread message content item. Required. + :vartype text: ~azure.ai.project.models.MessageTextDetails + """ + + type: Literal["text"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'text'. Required. Default value is \"text\".""" + text: "_models.MessageTextDetails" = rest_field() + """The text and associated annotations for this thread message content item. Required.""" + + @overload + def __init__( + self, + *, + text: "_models.MessageTextDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="text", **kwargs) + + +class MessageTextDetails(_model_base.Model): + """The text and associated annotations for a single item of agent thread message content. + + + :ivar value: The text data. Required. + :vartype value: str + :ivar annotations: A list of annotations associated with this text. Required. + :vartype annotations: list[~azure.ai.project.models.MessageTextAnnotation] + """ + + value: str = rest_field() + """The text data. Required.""" + annotations: List["_models.MessageTextAnnotation"] = rest_field() + """A list of annotations associated with this text. Required.""" + + @overload + def __init__( + self, + *, + value: str, + annotations: List["_models.MessageTextAnnotation"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextFileCitationAnnotation(MessageTextAnnotation, discriminator="file_citation"): + """A citation within the message that points to a specific quote from a specific File associated + with the agent or the message. Generated when the agent uses the 'file_search' tool to search + files. + + + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + :ivar type: The object type, which is always 'file_citation'. Required. Default value is + "file_citation". + :vartype type: str + :ivar file_citation: A citation within the message that points to a specific quote from a + specific file. + Generated when the agent uses the "file_search" tool to search files. Required. + :vartype file_citation: ~azure.ai.project.models.MessageTextFileCitationDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["file_citation"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'file_citation'. Required. Default value is \"file_citation\".""" + file_citation: "_models.MessageTextFileCitationDetails" = rest_field() + """A citation within the message that points to a specific quote from a specific file. + Generated when the agent uses the \"file_search\" tool to search files. Required.""" + start_index: Optional[int] = rest_field() + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field() + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + text: str, + file_citation: "_models.MessageTextFileCitationDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_citation", **kwargs) + + +class MessageTextFileCitationDetails(_model_base.Model): + """A representation of a file-based text citation, as used in a file-based annotation of text + thread message content. + + + :ivar file_id: The ID of the file associated with this citation. Required. + :vartype file_id: str + :ivar quote: The specific quote cited in the associated file. Required. + :vartype quote: str + """ + + file_id: str = rest_field() + """The ID of the file associated with this citation. Required.""" + quote: str = rest_field() + """The specific quote cited in the associated file. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + quote: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextFilePathAnnotation(MessageTextAnnotation, discriminator="file_path"): + """A citation within the message that points to a file located at a specific path. + + + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + :ivar type: The object type, which is always 'file_path'. Required. Default value is + "file_path". + :vartype type: str + :ivar file_path: A URL for the file that's generated when the agent used the code_interpreter + tool to generate a file. Required. + :vartype file_path: ~azure.ai.project.models.MessageTextFilePathDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["file_path"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'file_path'. Required. Default value is \"file_path\".""" + file_path: "_models.MessageTextFilePathDetails" = rest_field() + """A URL for the file that's generated when the agent used the code_interpreter tool to generate a + file. Required.""" + start_index: Optional[int] = rest_field() + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field() + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + text: str, + file_path: "_models.MessageTextFilePathDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_path", **kwargs) + + +class MessageTextFilePathDetails(_model_base.Model): + """An encapsulation of an image file ID, as used by message image content. + + + :ivar file_id: The ID of the specific file that the citation is from. Required. + :vartype file_id: str + """ + + file_id: str = rest_field() + """The ID of the specific file that the citation is from. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="microsoft_fabric"): + """The input definition information for a Microsoft Fabric tool as used to configure an agent. + + + :ivar type: The object type, which is always 'microsoft_fabric'. Required. Default value is + "microsoft_fabric". + :vartype type: str + """ + + type: Literal["microsoft_fabric"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'microsoft_fabric'. Required. Default value is + \"microsoft_fabric\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="microsoft_fabric", **kwargs) + + +class OpenAIFile(_model_base.Model): + """Represents an agent that can call the model and use tools. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always 'file'. Required. Default value is "file". + :vartype object: str + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar bytes: The size of the file, in bytes. Required. + :vartype bytes: int + :ivar filename: The name of the file. Required. + :vartype filename: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar purpose: The intended purpose of a file. Required. Known values are: "fine-tune", + "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". + :vartype purpose: str or ~azure.ai.project.models.FilePurpose + :ivar status: The state of the file. This field is available in Azure OpenAI only. Known values + are: "uploaded", "pending", "running", "processed", "error", "deleting", and "deleted". + :vartype status: str or ~azure.ai.project.models.FileState + :ivar status_details: The error message with details in case processing of this file failed. + This field is available in Azure OpenAI only. + :vartype status_details: str + """ + + object: Literal["file"] = rest_field() + """The object type, which is always 'file'. Required. Default value is \"file\".""" + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + bytes: int = rest_field() + """The size of the file, in bytes. Required.""" + filename: str = rest_field() + """The name of the file. Required.""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + purpose: Union[str, "_models.FilePurpose"] = rest_field() + """The intended purpose of a file. Required. Known values are: \"fine-tune\", + \"fine-tune-results\", \"assistants\", \"assistants_output\", \"batch\", \"batch_output\", and + \"vision\".""" + status: Optional[Union[str, "_models.FileState"]] = rest_field() + """The state of the file. This field is available in Azure OpenAI only. Known values are: + \"uploaded\", \"pending\", \"running\", \"processed\", \"error\", \"deleting\", and + \"deleted\".""" + status_details: Optional[str] = rest_field() + """The error message with details in case processing of this file failed. This field is available + in Azure OpenAI only.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + bytes: int, + filename: str, + created_at: datetime.datetime, + purpose: Union[str, "_models.FilePurpose"], + status: Optional[Union[str, "_models.FileState"]] = None, + status_details: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["file"] = "file" + + +class OpenAIPageableListOfAgent(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.project.models.Agent] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.Agent"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.Agent"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfRunStep(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.project.models.RunStep] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.RunStep"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.RunStep"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfThreadMessage(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.project.models.ThreadMessage] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.ThreadMessage"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.ThreadMessage"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfThreadRun(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.project.models.ThreadRun] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.ThreadRun"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.ThreadRun"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfVectorStore(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.project.models.VectorStore] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.VectorStore"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.VectorStore"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfVectorStoreFile(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.project.models.VectorStoreFile] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.VectorStoreFile"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.VectorStoreFile"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class RecurrenceSchedule(_model_base.Model): + """RecurrenceSchedule Definition. + + + :ivar hours: List of hours for the schedule. Required. + :vartype hours: list[int] + :ivar minutes: List of minutes for the schedule. Required. + :vartype minutes: list[int] + :ivar week_days: List of days for the schedule. + :vartype week_days: list[str or ~azure.ai.project.models.WeekDays] + :ivar month_days: List of month days for the schedule. + :vartype month_days: list[int] + """ + + hours: List[int] = rest_field() + """List of hours for the schedule. Required.""" + minutes: List[int] = rest_field() + """List of minutes for the schedule. Required.""" + week_days: Optional[List[Union[str, "_models.WeekDays"]]] = rest_field(name="weekDays") + """List of days for the schedule.""" + month_days: Optional[List[int]] = rest_field(name="monthDays") + """List of month days for the schedule.""" + + @overload + def __init__( + self, + *, + hours: List[int], + minutes: List[int], + week_days: Optional[List[Union[str, "_models.WeekDays"]]] = None, + month_days: Optional[List[int]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RecurrenceTrigger(Trigger, discriminator="Recurrence"): + """Recurrence Trigger Definition. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar type: Required. Default value is "Recurrence". + :vartype type: str + :ivar frequency: The frequency to trigger schedule. Required. Known values are: "Month", + "Week", "Day", "Hour", and "Minute". + :vartype frequency: str or ~azure.ai.project.models.Frequency + :ivar interval: Specifies schedule interval in conjunction with frequency. Required. + :vartype interval: int + :ivar schedule: The recurrence schedule. Required. + :vartype schedule: ~azure.ai.project.models.RecurrenceSchedule + """ + + type: Literal["Recurrence"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore + """Required. Default value is \"Recurrence\".""" + frequency: Union[str, "_models.Frequency"] = rest_field() + """The frequency to trigger schedule. Required. Known values are: \"Month\", \"Week\", \"Day\", + \"Hour\", and \"Minute\".""" + interval: int = rest_field() + """Specifies schedule interval in conjunction with frequency. Required.""" + schedule: "_models.RecurrenceSchedule" = rest_field() + """The recurrence schedule. Required.""" + + @overload + def __init__( + self, + *, + frequency: Union[str, "_models.Frequency"], + interval: int, + schedule: "_models.RecurrenceSchedule", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="Recurrence", **kwargs) + + +class RequiredAction(_model_base.Model): + """An abstract representation of a required action for an agent thread run to continue. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + SubmitToolOutputsAction + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RequiredToolCall(_model_base.Model): + """An abstract representation a a tool invocation needed by the model to continue a run. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RequiredFunctionToolCall + + + :ivar type: The object type for the required tool call. Required. Default value is None. + :vartype type: str + :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. + Required. + :vartype id: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type for the required tool call. Required. Default value is None.""" + id: str = rest_field() + """The ID of the tool call. This ID must be referenced when submitting tool outputs. Required.""" + + @overload + def __init__( + self, + *, + type: str, + id: str, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RequiredFunctionToolCall(RequiredToolCall, discriminator="function"): + """A representation of a requested call to a function tool, needed by the model to continue + evaluation of a run. + + + :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. + Required. + :vartype id: str + :ivar type: The object type of the required tool call. Always 'function' for function tools. + Required. Default value is "function". + :vartype type: str + :ivar function: Detailed information about the function to be executed by the tool that + includes name and arguments. Required. + :vartype function: ~azure.ai.project.models.RequiredFunctionToolCallDetails + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object type of the required tool call. Always 'function' for function tools. Required. + Default value is \"function\".""" + function: "_models.RequiredFunctionToolCallDetails" = rest_field() + """Detailed information about the function to be executed by the tool that includes name and + arguments. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + function: "_models.RequiredFunctionToolCallDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class RequiredFunctionToolCallDetails(_model_base.Model): + """The detailed information for a function invocation, as provided by a required action invoking a + function tool, that includes the name of and arguments to the function. + + + :ivar name: The name of the function. Required. + :vartype name: str + :ivar arguments: The arguments to use when invoking the named function, as provided by the + model. Arguments are presented as a JSON document that should be validated and parsed for + evaluation. Required. + :vartype arguments: str + """ + + name: str = rest_field() + """The name of the function. Required.""" + arguments: str = rest_field() + """The arguments to use when invoking the named function, as provided by the model. Arguments are + presented as a JSON document that should be validated and parsed for evaluation. Required.""" + + @overload + def __init__( + self, + *, + name: str, + arguments: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunCompletionUsage(_model_base.Model): + """Usage statistics related to the run. This value will be ``null`` if the run is not in a + terminal state (i.e. ``in_progress``\\ , ``queued``\\ , etc.). + + + :ivar completion_tokens: Number of completion tokens used over the course of the run. Required. + :vartype completion_tokens: int + :ivar prompt_tokens: Number of prompt tokens used over the course of the run. Required. + :vartype prompt_tokens: int + :ivar total_tokens: Total number of tokens used (prompt + completion). Required. + :vartype total_tokens: int + """ + + completion_tokens: int = rest_field() + """Number of completion tokens used over the course of the run. Required.""" + prompt_tokens: int = rest_field() + """Number of prompt tokens used over the course of the run. Required.""" + total_tokens: int = rest_field() + """Total number of tokens used (prompt + completion). Required.""" + + @overload + def __init__( + self, + *, + completion_tokens: int, + prompt_tokens: int, + total_tokens: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunError(_model_base.Model): + """The details of an error as encountered by an agent thread run. + + + :ivar code: The status for the error. Required. + :vartype code: str + :ivar message: The human-readable text associated with the error. Required. + :vartype message: str + """ + + code: str = rest_field() + """The status for the error. Required.""" + message: str = rest_field() + """The human-readable text associated with the error. Required.""" + + @overload + def __init__( + self, + *, + code: str, + message: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStep(_model_base.Model): + """Detailed information about a single step of an agent thread run. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.run.step'. Required. Default value is + "thread.run.step". + :vartype object: str + :ivar type: The type of run step, which can be either message_creation or tool_calls. Required. + Known values are: "message_creation" and "tool_calls". + :vartype type: str or ~azure.ai.project.models.RunStepType + :ivar assistant_id: The ID of the agent associated with the run step. Required. + :vartype assistant_id: str + :ivar thread_id: The ID of the thread that was run. Required. + :vartype thread_id: str + :ivar run_id: The ID of the run that this run step is a part of. Required. + :vartype run_id: str + :ivar status: The status of this run step. Required. Known values are: "in_progress", + "cancelled", "failed", "completed", and "expired". + :vartype status: str or ~azure.ai.project.models.RunStepStatus + :ivar step_details: The details for this run step. Required. + :vartype step_details: ~azure.ai.project.models.RunStepDetails + :ivar last_error: If applicable, information about the last error encountered by this run step. + Required. + :vartype last_error: ~azure.ai.project.models.RunStepError + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar expired_at: The Unix timestamp, in seconds, representing when this item expired. + Required. + :vartype expired_at: ~datetime.datetime + :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. + :vartype completed_at: ~datetime.datetime + :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. + Required. + :vartype cancelled_at: ~datetime.datetime + :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. + :vartype failed_at: ~datetime.datetime + :ivar usage: Usage statistics related to the run step. This value will be ``null`` while the + run step's status is ``in_progress``. + :vartype usage: ~azure.ai.project.models.RunStepCompletionUsage + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run.step"] = rest_field() + """The object type, which is always 'thread.run.step'. Required. Default value is + \"thread.run.step\".""" + type: Union[str, "_models.RunStepType"] = rest_field() + """The type of run step, which can be either message_creation or tool_calls. Required. Known + values are: \"message_creation\" and \"tool_calls\".""" + assistant_id: str = rest_field() + """The ID of the agent associated with the run step. Required.""" + thread_id: str = rest_field() + """The ID of the thread that was run. Required.""" + run_id: str = rest_field() + """The ID of the run that this run step is a part of. Required.""" + status: Union[str, "_models.RunStepStatus"] = rest_field() + """The status of this run step. Required. Known values are: \"in_progress\", \"cancelled\", + \"failed\", \"completed\", and \"expired\".""" + step_details: "_models.RunStepDetails" = rest_field() + """The details for this run step. Required.""" + last_error: "_models.RunStepError" = rest_field() + """If applicable, information about the last error encountered by this run step. Required.""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + expired_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this item expired. Required.""" + completed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this completed. Required.""" + cancelled_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" + failed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this failed. Required.""" + usage: Optional["_models.RunStepCompletionUsage"] = rest_field() + """Usage statistics related to the run step. This value will be ``null`` while the run step's + status is ``in_progress``.""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + type: Union[str, "_models.RunStepType"], + assistant_id: str, + thread_id: str, + run_id: str, + status: Union[str, "_models.RunStepStatus"], + step_details: "_models.RunStepDetails", + last_error: "_models.RunStepError", + created_at: datetime.datetime, + expired_at: datetime.datetime, + completed_at: datetime.datetime, + cancelled_at: datetime.datetime, + failed_at: datetime.datetime, + metadata: Dict[str, str], + usage: Optional["_models.RunStepCompletionUsage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run.step"] = "thread.run.step" + + +class RunStepToolCall(_model_base.Model): + """An abstract representation of a detailed tool call as recorded within a run step for an + existing run. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepAzureAISearchToolCall, RunStepBingGroundingToolCall, RunStepCodeInterpreterToolCall, + RunStepFileSearchToolCall, RunStepFunctionToolCall, RunStepMicrosoftFabricToolCall, + RunStepSharepointToolCall + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + id: str = rest_field() + """The ID of the tool call. This ID must be referenced when you submit tool outputs. Required.""" + + @overload + def __init__( + self, + *, + type: str, + id: str, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepAzureAISearchToolCall(RunStepToolCall, discriminator="azure_ai_search"): + """A record of a call to an Azure AI Search tool, issued by the model in evaluation of a defined + tool, that represents + executed Azure AI search. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is + "azure_ai_search". + :vartype type: str + :ivar azure_ai_search: Reserved for future use. Required. + :vartype azure_ai_search: dict[str, str] + """ + + type: Literal["azure_ai_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'azure_ai_search'. Required. Default value is + \"azure_ai_search\".""" + azure_ai_search: Dict[str, str] = rest_field() + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + azure_ai_search: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="azure_ai_search", **kwargs) + + +class RunStepBingGroundingToolCall(RunStepToolCall, discriminator="bing_grounding"): + """A record of a call to a bing grounding tool, issued by the model in evaluation of a defined + tool, that represents + executed search with bing grounding. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is + "bing_grounding". + :vartype type: str + :ivar bing_grounding: Reserved for future use. Required. + :vartype bing_grounding: dict[str, str] + """ + + type: Literal["bing_grounding"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'bing_grounding'. Required. Default value is + \"bing_grounding\".""" + bing_grounding: Dict[str, str] = rest_field() + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + bing_grounding: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="bing_grounding", **kwargs) + + +class RunStepCodeInterpreterToolCallOutput(_model_base.Model): + """An abstract representation of an emitted output from a code interpreter tool. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepCodeInterpreterImageOutput, RunStepCodeInterpreterLogOutput + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCodeInterpreterImageOutput(RunStepCodeInterpreterToolCallOutput, discriminator="image"): + """A representation of an image output emitted by a code interpreter tool in response to a tool + call by the model. + + + :ivar type: The object type, which is always 'image'. Required. Default value is "image". + :vartype type: str + :ivar image: Referential information for the image associated with this output. Required. + :vartype image: ~azure.ai.project.models.RunStepCodeInterpreterImageReference + """ + + type: Literal["image"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'image'. Required. Default value is \"image\".""" + image: "_models.RunStepCodeInterpreterImageReference" = rest_field() + """Referential information for the image associated with this output. Required.""" + + @overload + def __init__( + self, + *, + image: "_models.RunStepCodeInterpreterImageReference", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image", **kwargs) + + +class RunStepCodeInterpreterImageReference(_model_base.Model): + """An image reference emitted by a code interpreter tool in response to a tool call by the model. + + + :ivar file_id: The ID of the file associated with this image. Required. + :vartype file_id: str + """ + + file_id: str = rest_field() + """The ID of the file associated with this image. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCodeInterpreterLogOutput(RunStepCodeInterpreterToolCallOutput, discriminator="logs"): + """A representation of a log output emitted by a code interpreter tool in response to a tool call + by the model. + + + :ivar type: The object type, which is always 'logs'. Required. Default value is "logs". + :vartype type: str + :ivar logs: The serialized log output emitted by the code interpreter. Required. + :vartype logs: str + """ + + type: Literal["logs"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'logs'. Required. Default value is \"logs\".""" + logs: str = rest_field() + """The serialized log output emitted by the code interpreter. Required.""" + + @overload + def __init__( + self, + *, + logs: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="logs", **kwargs) + + +class RunStepCodeInterpreterToolCall(RunStepToolCall, discriminator="code_interpreter"): + """A record of a call to a code interpreter tool, issued by the model in evaluation of a defined + tool, that + represents inputs and outputs consumed and emitted by the code interpreter. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is + "code_interpreter". + :vartype type: str + :ivar code_interpreter: The details of the tool call to the code interpreter tool. Required. + :vartype code_interpreter: ~azure.ai.project.models.RunStepCodeInterpreterToolCallDetails + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'code_interpreter'. Required. Default value is + \"code_interpreter\".""" + code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails" = rest_field() + """The details of the tool call to the code interpreter tool. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="code_interpreter", **kwargs) + + +class RunStepCodeInterpreterToolCallDetails(_model_base.Model): + """The detailed information about a code interpreter invocation by the model. + + + :ivar input: The input provided by the model to the code interpreter tool. Required. + :vartype input: str + :ivar outputs: The outputs produced by the code interpreter tool back to the model in response + to the tool call. Required. + :vartype outputs: list[~azure.ai.project.models.RunStepCodeInterpreterToolCallOutput] + """ + + input: str = rest_field() + """The input provided by the model to the code interpreter tool. Required.""" + outputs: List["_models.RunStepCodeInterpreterToolCallOutput"] = rest_field() + """The outputs produced by the code interpreter tool back to the model in response to the tool + call. Required.""" + + @overload + def __init__( + self, + *, + input: str, + outputs: List["_models.RunStepCodeInterpreterToolCallOutput"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCompletionUsage(_model_base.Model): + """Usage statistics related to the run step. + + + :ivar completion_tokens: Number of completion tokens used over the course of the run step. + Required. + :vartype completion_tokens: int + :ivar prompt_tokens: Number of prompt tokens used over the course of the run step. Required. + :vartype prompt_tokens: int + :ivar total_tokens: Total number of tokens used (prompt + completion). Required. + :vartype total_tokens: int + """ + + completion_tokens: int = rest_field() + """Number of completion tokens used over the course of the run step. Required.""" + prompt_tokens: int = rest_field() + """Number of prompt tokens used over the course of the run step. Required.""" + total_tokens: int = rest_field() + """Total number of tokens used (prompt + completion). Required.""" + + @overload + def __init__( + self, + *, + completion_tokens: int, + prompt_tokens: int, + total_tokens: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDelta(_model_base.Model): + """Represents the delta payload in a streaming run step delta chunk. + + :ivar step_details: The details of the run step. + :vartype step_details: ~azure.ai.project.models.RunStepDeltaDetail + """ + + step_details: Optional["_models.RunStepDeltaDetail"] = rest_field() + """The details of the run step.""" + + @overload + def __init__( + self, + *, + step_details: Optional["_models.RunStepDeltaDetail"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaChunk(_model_base.Model): + """Represents a run step delta i.e. any changed fields on a run step during streaming. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier of the run step, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``thread.run.step.delta``. Required. Default + value is "thread.run.step.delta". + :vartype object: str + :ivar delta: The delta containing the fields that have changed on the run step. Required. + :vartype delta: ~azure.ai.project.models.RunStepDelta + """ + + id: str = rest_field() + """The identifier of the run step, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run.step.delta"] = rest_field() + """The object type, which is always ``thread.run.step.delta``. Required. Default value is + \"thread.run.step.delta\".""" + delta: "_models.RunStepDelta" = rest_field() + """The delta containing the fields that have changed on the run step. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + delta: "_models.RunStepDelta", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run.step.delta"] = "thread.run.step.delta" + + +class RunStepDeltaCodeInterpreterDetailItemObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the Code Interpreter tool call data in a streaming run step's tool calls. + + :ivar input: The input into the Code Interpreter tool call. + :vartype input: str + :ivar outputs: The outputs from the Code Interpreter tool call. Code Interpreter can output one + or more + items, including text (\\ ``logs``\\ ) or images (\\ ``image``\\ ). Each of these are + represented by a + different object type. + :vartype outputs: list[~azure.ai.project.models.RunStepDeltaCodeInterpreterOutput] + """ + + input: Optional[str] = rest_field() + """The input into the Code Interpreter tool call.""" + outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = rest_field() + """The outputs from the Code Interpreter tool call. Code Interpreter can output one or more + items, including text (\ ``logs``\ ) or images (\ ``image``\ ). Each of these are represented + by a + different object type.""" + + @overload + def __init__( + self, + *, + input: Optional[str] = None, + outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterOutput(_model_base.Model): + """The abstract base representation of a streaming run step tool call's Code Interpreter tool + output. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaCodeInterpreterImageOutput, RunStepDeltaCodeInterpreterLogOutput + + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The type of the streaming run step tool call's Code Interpreter output. Required. + Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field() + """The index of the output in the streaming run step tool call's Code Interpreter outputs array. + Required.""" + type: str = rest_discriminator(name="type") + """The type of the streaming run step tool call's Code Interpreter output. Required. Default value + is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterImageOutput(RunStepDeltaCodeInterpreterOutput, discriminator="image"): + """Represents an image output as produced the Code interpreter tool and as represented in a + streaming run step's delta tool calls collection. + + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The object type, which is always "image.". Required. Default value is "image". + :vartype type: str + :ivar image: The image data for the Code Interpreter tool call output. + :vartype image: ~azure.ai.project.models.RunStepDeltaCodeInterpreterImageOutputObject + """ + + type: Literal["image"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"image.\". Required. Default value is \"image\".""" + image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = rest_field() + """The image data for the Code Interpreter tool call output.""" + + @overload + def __init__( + self, + *, + index: int, + image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image", **kwargs) + + +class RunStepDeltaCodeInterpreterImageOutputObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the data for a streaming run step's Code Interpreter tool call image output. + + :ivar file_id: The file ID for the image. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field() + """The file ID for the image.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterLogOutput(RunStepDeltaCodeInterpreterOutput, discriminator="logs"): + """Represents a log output as produced by the Code Interpreter tool and as represented in a + streaming run step's delta tool calls collection. + + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The type of the object, which is always "logs.". Required. Default value is "logs". + :vartype type: str + :ivar logs: The text output from the Code Interpreter tool call. + :vartype logs: str + """ + + type: Literal["logs"] = rest_discriminator(name="type") # type: ignore + """The type of the object, which is always \"logs.\". Required. Default value is \"logs\".""" + logs: Optional[str] = rest_field() + """The text output from the Code Interpreter tool call.""" + + @overload + def __init__( + self, + *, + index: int, + logs: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="logs", **kwargs) + + +class RunStepDeltaToolCall(_model_base.Model): + """The abstract base representation of a single tool call within a streaming run step's delta tool + call details. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaCodeInterpreterToolCall, RunStepDeltaFileSearchToolCall, + RunStepDeltaFunctionToolCall + + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The type of the tool call detail item in a streaming run step's details. Required. + Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field() + """The index of the tool call detail in the run step's tool_calls array. Required.""" + id: str = rest_field() + """The ID of the tool call, used when submitting outputs to the run. Required.""" + type: str = rest_discriminator(name="type") + """The type of the tool call detail item in a streaming run step's details. Required. Default + value is None.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterToolCall(RunStepDeltaToolCall, discriminator="code_interpreter"): + """Represents a Code Interpreter tool call within a streaming run step's tool call details. + + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "code_interpreter.". Required. Default value is + "code_interpreter". + :vartype type: str + :ivar code_interpreter: The Code Interpreter data for the tool call. + :vartype code_interpreter: ~azure.ai.project.models.RunStepDeltaCodeInterpreterDetailItemObject + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"code_interpreter.\". Required. Default value is + \"code_interpreter\".""" + code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = rest_field() + """The Code Interpreter data for the tool call.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="code_interpreter", **kwargs) + + +class RunStepDeltaDetail(_model_base.Model): + """Represents a single run step detail item in a streaming run step's delta payload. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaMessageCreation, RunStepDeltaToolCallObject + + + :ivar type: The object type for the run step detail object. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type for the run step detail object. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaFileSearchToolCall(RunStepDeltaToolCall, discriminator="file_search"): + """Represents a file search tool call within a streaming run step's tool call details. + + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "file_search.". Required. Default value is + "file_search". + :vartype type: str + :ivar file_search: Reserved for future use. + :vartype file_search: dict[str, str] + """ + + type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"file_search.\". Required. Default value is \"file_search\".""" + file_search: Optional[Dict[str, str]] = rest_field() + """Reserved for future use.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + file_search: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_search", **kwargs) + + +class RunStepDeltaFunction(_model_base.Model): + """Represents the function data in a streaming run step delta's function tool call. + + :ivar name: The name of the function. + :vartype name: str + :ivar arguments: The arguments passed to the function as input. + :vartype arguments: str + :ivar output: The output of the function, null if outputs have not yet been submitted. + :vartype output: str + """ + + name: Optional[str] = rest_field() + """The name of the function.""" + arguments: Optional[str] = rest_field() + """The arguments passed to the function as input.""" + output: Optional[str] = rest_field() + """The output of the function, null if outputs have not yet been submitted.""" + + @overload + def __init__( + self, + *, + name: Optional[str] = None, + arguments: Optional[str] = None, + output: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaFunctionToolCall(RunStepDeltaToolCall, discriminator="function"): + """Represents a function tool call within a streaming run step's tool call details. + + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "function.". Required. Default value is + "function". + :vartype type: str + :ivar function: The function data for the tool call. + :vartype function: ~azure.ai.project.models.RunStepDeltaFunction + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"function.\". Required. Default value is \"function\".""" + function: Optional["_models.RunStepDeltaFunction"] = rest_field() + """The function data for the tool call.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + function: Optional["_models.RunStepDeltaFunction"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class RunStepDeltaMessageCreation(RunStepDeltaDetail, discriminator="message_creation"): + """Represents a message creation within a streaming run step delta. + + + :ivar type: The object type, which is always "message_creation.". Required. Default value is + "message_creation". + :vartype type: str + :ivar message_creation: The message creation data. + :vartype message_creation: ~azure.ai.project.models.RunStepDeltaMessageCreationObject + """ + + type: Literal["message_creation"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"message_creation.\". Required. Default value is + \"message_creation\".""" + message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = rest_field() + """The message creation data.""" + + @overload + def __init__( + self, + *, + message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="message_creation", **kwargs) + + +class RunStepDeltaMessageCreationObject(_model_base.Model): + """Represents the data within a streaming run step message creation response object. + + :ivar message_id: The ID of the newly-created message. + :vartype message_id: str + """ + + message_id: Optional[str] = rest_field() + """The ID of the newly-created message.""" + + @overload + def __init__( + self, + *, + message_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaToolCallObject(RunStepDeltaDetail, discriminator="tool_calls"): + """Represents an invocation of tool calls as part of a streaming run step. + + + :ivar type: The object type, which is always "tool_calls.". Required. Default value is + "tool_calls". + :vartype type: str + :ivar tool_calls: The collection of tool calls for the tool call detail item. + :vartype tool_calls: list[~azure.ai.project.models.RunStepDeltaToolCall] + """ + + type: Literal["tool_calls"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"tool_calls.\". Required. Default value is \"tool_calls\".""" + tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = rest_field() + """The collection of tool calls for the tool call detail item.""" + + @overload + def __init__( + self, + *, + tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="tool_calls", **kwargs) + + +class RunStepDetails(_model_base.Model): + """An abstract representation of the details for a run step. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepMessageCreationDetails, RunStepToolCallDetails + + + :ivar type: The object type. Required. Known values are: "message_creation" and "tool_calls". + :vartype type: str or ~azure.ai.project.models.RunStepType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Known values are: \"message_creation\" and \"tool_calls\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepError(_model_base.Model): + """The error information associated with a failed run step. + + + :ivar code: The error code for this error. Required. Known values are: "server_error" and + "rate_limit_exceeded". + :vartype code: str or ~azure.ai.project.models.RunStepErrorCode + :ivar message: The human-readable text associated with this error. Required. + :vartype message: str + """ + + code: Union[str, "_models.RunStepErrorCode"] = rest_field() + """The error code for this error. Required. Known values are: \"server_error\" and + \"rate_limit_exceeded\".""" + message: str = rest_field() + """The human-readable text associated with this error. Required.""" + + @overload + def __init__( + self, + *, + code: Union[str, "_models.RunStepErrorCode"], + message: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepFileSearchToolCall(RunStepToolCall, discriminator="file_search"): + """A record of a call to a file search tool, issued by the model in evaluation of a defined tool, + that represents + executed file search. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'file_search'. Required. Default value is + "file_search". + :vartype type: str + :ivar file_search: Reserved for future use. Required. + :vartype file_search: dict[str, str] + """ + + type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" + file_search: Dict[str, str] = rest_field() + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + file_search: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_search", **kwargs) + + +class RunStepFunctionToolCall(RunStepToolCall, discriminator="function"): + """A record of a call to a function tool, issued by the model in evaluation of a defined tool, + that represents the inputs + and output consumed and emitted by the specified function. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'function'. Required. Default value is "function". + :vartype type: str + :ivar function: The detailed information about the function called by the model. Required. + :vartype function: ~azure.ai.project.models.RunStepFunctionToolCallDetails + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'function'. Required. Default value is \"function\".""" + function: "_models.RunStepFunctionToolCallDetails" = rest_field() + """The detailed information about the function called by the model. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + function: "_models.RunStepFunctionToolCallDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class RunStepFunctionToolCallDetails(_model_base.Model): + """The detailed information about the function called by the model. + + + :ivar name: The name of the function. Required. + :vartype name: str + :ivar arguments: The arguments that the model requires are provided to the named function. + Required. + :vartype arguments: str + :ivar output: The output of the function, only populated for function calls that have already + have had their outputs submitted. Required. + :vartype output: str + """ + + name: str = rest_field() + """The name of the function. Required.""" + arguments: str = rest_field() + """The arguments that the model requires are provided to the named function. Required.""" + output: str = rest_field() + """The output of the function, only populated for function calls that have already have had their + outputs submitted. Required.""" + + @overload + def __init__( + self, + *, + name: str, + arguments: str, + output: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepMessageCreationDetails(RunStepDetails, discriminator="message_creation"): + """The detailed information associated with a message creation run step. + + + :ivar type: The object type, which is always 'message_creation'. Required. Represents a run + step to create a message. + :vartype type: str or ~azure.ai.project.models.MESSAGE_CREATION + :ivar message_creation: Information about the message creation associated with this run step. + Required. + :vartype message_creation: ~azure.ai.project.models.RunStepMessageCreationReference + """ + + type: Literal[RunStepType.MESSAGE_CREATION] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'message_creation'. Required. Represents a run step to create + a message.""" + message_creation: "_models.RunStepMessageCreationReference" = rest_field() + """Information about the message creation associated with this run step. Required.""" + + @overload + def __init__( + self, + *, + message_creation: "_models.RunStepMessageCreationReference", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=RunStepType.MESSAGE_CREATION, **kwargs) + + +class RunStepMessageCreationReference(_model_base.Model): + """The details of a message created as a part of a run step. + + + :ivar message_id: The ID of the message created by this run step. Required. + :vartype message_id: str + """ + + message_id: str = rest_field() + """The ID of the message created by this run step. Required.""" + + @overload + def __init__( + self, + *, + message_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepMicrosoftFabricToolCall(RunStepToolCall, discriminator="microsoft_fabric"): + """A record of a call to a Microsoft Fabric tool, issued by the model in evaluation of a defined + tool, that represents + executed Microsoft Fabric operations. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'microsoft_fabric'. Required. Default value is + "microsoft_fabric". + :vartype type: str + :ivar microsoft_fabric: Reserved for future use. Required. + :vartype microsoft_fabric: dict[str, str] + """ + + type: Literal["microsoft_fabric"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'microsoft_fabric'. Required. Default value is + \"microsoft_fabric\".""" + microsoft_fabric: Dict[str, str] = rest_field() + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + microsoft_fabric: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="microsoft_fabric", **kwargs) + + +class RunStepSharepointToolCall(RunStepToolCall, discriminator="sharepoint"): + """A record of a call to a SharePoint tool, issued by the model in evaluation of a defined tool, + that represents + executed SharePoint actions. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'sharepoint'. Required. Default value is + "sharepoint". + :vartype type: str + :ivar share_point: Reserved for future use. Required. + :vartype share_point: dict[str, str] + """ + + type: Literal["sharepoint"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'sharepoint'. Required. Default value is \"sharepoint\".""" + share_point: Dict[str, str] = rest_field(name="sharepoint") + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + share_point: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="sharepoint", **kwargs) + + +class RunStepToolCallDetails(RunStepDetails, discriminator="tool_calls"): + """The detailed information associated with a run step calling tools. + + + :ivar type: The object type, which is always 'tool_calls'. Required. Represents a run step that + calls tools. + :vartype type: str or ~azure.ai.project.models.TOOL_CALLS + :ivar tool_calls: A list of tool call details for this run step. Required. + :vartype tool_calls: list[~azure.ai.project.models.RunStepToolCall] + """ + + type: Literal[RunStepType.TOOL_CALLS] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'tool_calls'. Required. Represents a run step that calls + tools.""" + tool_calls: List["_models.RunStepToolCall"] = rest_field() + """A list of tool call details for this run step. Required.""" + + @overload + def __init__( + self, + *, + tool_calls: List["_models.RunStepToolCall"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=RunStepType.TOOL_CALLS, **kwargs) + + +class SamplingStrategy(_model_base.Model): + """SamplingStrategy Definition. + + + :ivar rate: Sampling rate. Required. + :vartype rate: float + """ + + rate: float = rest_field() + """Sampling rate. Required.""" + + @overload + def __init__( + self, + *, + rate: float, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SharepointToolDefinition(ToolDefinition, discriminator="sharepoint"): + """The input definition information for a sharepoint tool as used to configure an agent. + + + :ivar type: The object type, which is always 'sharepoint'. Required. Default value is + "sharepoint". + :vartype type: str + """ + + type: Literal["sharepoint"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'sharepoint'. Required. Default value is \"sharepoint\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="sharepoint", **kwargs) + + +class SubmitToolOutputsAction(RequiredAction, discriminator="submit_tool_outputs"): + """The details for required tool calls that must be submitted for an agent thread run to continue. + + + :ivar type: The object type, which is always 'submit_tool_outputs'. Required. Default value is + "submit_tool_outputs". + :vartype type: str + :ivar submit_tool_outputs: The details describing tools that should be called to submit tool + outputs. Required. + :vartype submit_tool_outputs: ~azure.ai.project.models.SubmitToolOutputsDetails + """ + + type: Literal["submit_tool_outputs"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'submit_tool_outputs'. Required. Default value is + \"submit_tool_outputs\".""" + submit_tool_outputs: "_models.SubmitToolOutputsDetails" = rest_field() + """The details describing tools that should be called to submit tool outputs. Required.""" + + @overload + def __init__( + self, + *, + submit_tool_outputs: "_models.SubmitToolOutputsDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="submit_tool_outputs", **kwargs) + + +class SubmitToolOutputsDetails(_model_base.Model): + """The details describing tools that should be called to submit tool outputs. + + + :ivar tool_calls: The list of tool calls that must be resolved for the agent thread run to + continue. Required. + :vartype tool_calls: list[~azure.ai.project.models.RequiredToolCall] + """ + + tool_calls: List["_models.RequiredToolCall"] = rest_field() + """The list of tool calls that must be resolved for the agent thread run to continue. Required.""" + + @overload + def __init__( + self, + *, + tool_calls: List["_models.RequiredToolCall"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SystemData(_model_base.Model): + """Metadata pertaining to creation and last modification of the resource. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar created_at: The timestamp the resource was created at. + :vartype created_at: ~datetime.datetime + :ivar created_by: The identity that created the resource. + :vartype created_by: str + :ivar created_by_type: The identity type that created the resource. + :vartype created_by_type: str + :ivar last_modified_at: The timestamp of resource last modification (UTC). + :vartype last_modified_at: ~datetime.datetime + """ + + created_at: Optional[datetime.datetime] = rest_field(name="createdAt", visibility=["read"], format="rfc3339") + """The timestamp the resource was created at.""" + created_by: Optional[str] = rest_field(name="createdBy", visibility=["read"]) + """The identity that created the resource.""" + created_by_type: Optional[str] = rest_field(name="createdByType", visibility=["read"]) + """The identity type that created the resource.""" + last_modified_at: Optional[datetime.datetime] = rest_field( + name="lastModifiedAt", visibility=["read"], format="rfc3339" + ) + """The timestamp of resource last modification (UTC).""" + + +class ThreadDeletionStatus(_model_base.Model): + """The status of a thread deletion operation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'thread.deleted'. Required. Default value is + "thread.deleted". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["thread.deleted"] = rest_field() + """The object type, which is always 'thread.deleted'. Required. Default value is + \"thread.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.deleted"] = "thread.deleted" + + +class ThreadMessage(_model_base.Model): + """A single, existing message within an agent thread. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.message'. Required. Default value is + "thread.message". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar thread_id: The ID of the thread that this message belongs to. Required. + :vartype thread_id: str + :ivar status: The status of the message. Required. Known values are: "in_progress", + "incomplete", and "completed". + :vartype status: str or ~azure.ai.project.models.MessageStatus + :ivar incomplete_details: On an incomplete message, details about why the message is + incomplete. Required. + :vartype incomplete_details: ~azure.ai.project.models.MessageIncompleteDetails + :ivar completed_at: The Unix timestamp (in seconds) for when the message was completed. + Required. + :vartype completed_at: ~datetime.datetime + :ivar incomplete_at: The Unix timestamp (in seconds) for when the message was marked as + incomplete. Required. + :vartype incomplete_at: ~datetime.datetime + :ivar role: The role associated with the agent thread message. Required. Known values are: + "user" and "assistant". + :vartype role: str or ~azure.ai.project.models.MessageRole + :ivar content: The list of content items associated with the agent thread message. Required. + :vartype content: list[~azure.ai.project.models.MessageContent] + :ivar assistant_id: If applicable, the ID of the agent that authored this message. Required. + :vartype assistant_id: str + :ivar run_id: If applicable, the ID of the run associated with the authoring of this message. + Required. + :vartype run_id: str + :ivar attachments: A list of files attached to the message, and the tools they were added to. + Required. + :vartype attachments: list[~azure.ai.project.models.MessageAttachment] + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.message"] = rest_field() + """The object type, which is always 'thread.message'. Required. Default value is + \"thread.message\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + thread_id: str = rest_field() + """The ID of the thread that this message belongs to. Required.""" + status: Union[str, "_models.MessageStatus"] = rest_field() + """The status of the message. Required. Known values are: \"in_progress\", \"incomplete\", and + \"completed\".""" + incomplete_details: "_models.MessageIncompleteDetails" = rest_field() + """On an incomplete message, details about why the message is incomplete. Required.""" + completed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the message was completed. Required.""" + incomplete_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the message was marked as incomplete. Required.""" + role: Union[str, "_models.MessageRole"] = rest_field() + """The role associated with the agent thread message. Required. Known values are: \"user\" and + \"assistant\".""" + content: List["_models.MessageContent"] = rest_field() + """The list of content items associated with the agent thread message. Required.""" + assistant_id: str = rest_field() + """If applicable, the ID of the agent that authored this message. Required.""" + run_id: str = rest_field() + """If applicable, the ID of the run associated with the authoring of this message. Required.""" + attachments: List["_models.MessageAttachment"] = rest_field() + """A list of files attached to the message, and the tools they were added to. Required.""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + thread_id: str, + status: Union[str, "_models.MessageStatus"], + incomplete_details: "_models.MessageIncompleteDetails", + completed_at: datetime.datetime, + incomplete_at: datetime.datetime, + role: Union[str, "_models.MessageRole"], + content: List["_models.MessageContent"], + assistant_id: str, + run_id: str, + attachments: List["_models.MessageAttachment"], + metadata: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.message"] = "thread.message" + + +class ThreadMessageOptions(_model_base.Model): + """A single message within an agent thread, as provided during that thread's creation for its + initial state. + + All required parameters must be populated in order to send to server. + + :ivar role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Required. Known values are: "user" and "assistant". + :vartype role: str or ~azure.ai.project.models.MessageRole + :ivar content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :vartype content: str + :ivar attachments: A list of files attached to the message, and the tools they should be added + to. + :vartype attachments: list[~azure.ai.project.models.MessageAttachment] + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. + :vartype metadata: dict[str, str] + """ + + role: Union[str, "_models.MessageRole"] = rest_field() + """The role of the entity that is creating the message. Allowed values include: + + + * ``user``\ : Indicates the message is sent by an actual user and should be used in most cases + to represent user-generated messages. + * ``assistant``\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Required. Known values are: \"user\" and \"assistant\".""" + content: str = rest_field() + """The textual content of the initial message. Currently, robust input including images and + annotated text may only be provided via + a separate call to the create message API. Required.""" + attachments: Optional[List["_models.MessageAttachment"]] = rest_field() + """A list of files attached to the message, and the tools they should be added to.""" + metadata: Optional[Dict[str, str]] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length.""" + + @overload + def __init__( + self, + *, + role: Union[str, "_models.MessageRole"], + content: str, + attachments: Optional[List["_models.MessageAttachment"]] = None, + metadata: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ThreadRun(_model_base.Model): + """Data representing a single evaluation run of an agent thread. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.run'. Required. Default value is + "thread.run". + :vartype object: str + :ivar thread_id: The ID of the thread associated with this run. Required. + :vartype thread_id: str + :ivar assistant_id: The ID of the agent associated with the thread this run was performed + against. Required. + :vartype assistant_id: str + :ivar status: The status of the agent thread run. Required. Known values are: "queued", + "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", and + "expired". + :vartype status: str or ~azure.ai.project.models.RunStatus + :ivar required_action: The details of the action required for the agent thread run to continue. + :vartype required_action: ~azure.ai.project.models.RequiredAction + :ivar last_error: The last error, if any, encountered by this agent thread run. Required. + :vartype last_error: ~azure.ai.project.models.RunError + :ivar model: The ID of the model to use. Required. + :vartype model: str + :ivar instructions: The overridden system instructions used for this agent thread run. + Required. + :vartype instructions: str + :ivar tools: The overridden enabled tools used for this agent thread run. Required. + :vartype tools: list[~azure.ai.project.models.ToolDefinition] + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar expires_at: The Unix timestamp, in seconds, representing when this item expires. + Required. + :vartype expires_at: ~datetime.datetime + :ivar started_at: The Unix timestamp, in seconds, representing when this item was started. + Required. + :vartype started_at: ~datetime.datetime + :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. + :vartype completed_at: ~datetime.datetime + :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. + Required. + :vartype cancelled_at: ~datetime.datetime + :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. + :vartype failed_at: ~datetime.datetime + :ivar incomplete_details: Details on why the run is incomplete. Will be ``null`` if the run is + not incomplete. Required. Known values are: "max_completion_tokens" and "max_prompt_tokens". + :vartype incomplete_details: str or ~azure.ai.project.models.IncompleteRunDetails + :ivar usage: Usage statistics related to the run. This value will be ``null`` if the run is not + in a terminal state (i.e. ``in_progress``\\ , ``queued``\\ , etc.). Required. + :vartype usage: ~azure.ai.project.models.RunCompletionUsage + :ivar temperature: The sampling temperature used for this run. If not set, defaults to 1. + :vartype temperature: float + :ivar top_p: The nucleus sampling value used for this run. If not set, defaults to 1. + :vartype top_p: float + :ivar max_prompt_tokens: The maximum number of prompt tokens specified to have been used over + the course of the run. Required. + :vartype max_prompt_tokens: int + :ivar max_completion_tokens: The maximum number of completion tokens specified to have been + used over the course of the run. Required. + :vartype max_completion_tokens: int + :ivar truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Required. + :vartype truncation_strategy: ~azure.ai.project.models.TruncationObject + :ivar tool_choice: Controls whether or not and which tool is called by the model. Required. Is + one of the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice + :vartype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :ivar response_format: The response format of the tool calls used in this run. Required. Is one + of the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat + :vartype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode or + ~azure.ai.project.models.AgentsApiResponseFormat + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + :ivar tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. + :vartype tool_resources: ~azure.ai.project.models.UpdateToolResourcesOptions + :ivar parallel_tool_calls: Determines if tools can be executed in parallel within the run. + :vartype parallel_tool_calls: bool + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run"] = rest_field() + """The object type, which is always 'thread.run'. Required. Default value is \"thread.run\".""" + thread_id: str = rest_field() + """The ID of the thread associated with this run. Required.""" + assistant_id: str = rest_field() + """The ID of the agent associated with the thread this run was performed against. Required.""" + status: Union[str, "_models.RunStatus"] = rest_field() + """The status of the agent thread run. Required. Known values are: \"queued\", \"in_progress\", + \"requires_action\", \"cancelling\", \"cancelled\", \"failed\", \"completed\", and \"expired\".""" + required_action: Optional["_models.RequiredAction"] = rest_field() + """The details of the action required for the agent thread run to continue.""" + last_error: "_models.RunError" = rest_field() + """The last error, if any, encountered by this agent thread run. Required.""" + model: str = rest_field() + """The ID of the model to use. Required.""" + instructions: str = rest_field() + """The overridden system instructions used for this agent thread run. Required.""" + tools: List["_models.ToolDefinition"] = rest_field() + """The overridden enabled tools used for this agent thread run. Required.""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + expires_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this item expires. Required.""" + started_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this item was started. Required.""" + completed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this completed. Required.""" + cancelled_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" + failed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this failed. Required.""" + incomplete_details: Union[str, "_models.IncompleteRunDetails"] = rest_field() + """Details on why the run is incomplete. Will be ``null`` if the run is not incomplete. Required. + Known values are: \"max_completion_tokens\" and \"max_prompt_tokens\".""" + usage: "_models.RunCompletionUsage" = rest_field() + """Usage statistics related to the run. This value will be ``null`` if the run is not in a + terminal state (i.e. ``in_progress``\ , ``queued``\ , etc.). Required.""" + temperature: Optional[float] = rest_field() + """The sampling temperature used for this run. If not set, defaults to 1.""" + top_p: Optional[float] = rest_field() + """The nucleus sampling value used for this run. If not set, defaults to 1.""" + max_prompt_tokens: int = rest_field() + """The maximum number of prompt tokens specified to have been used over the course of the run. + Required.""" + max_completion_tokens: int = rest_field() + """The maximum number of completion tokens specified to have been used over the course of the run. + Required.""" + truncation_strategy: "_models.TruncationObject" = rest_field() + """The strategy to use for dropping messages as the context windows moves forward. Required.""" + tool_choice: "_types.AgentsApiToolChoiceOption" = rest_field() + """Controls whether or not and which tool is called by the model. Required. Is one of the + following types: str, Union[str, \"_models.AgentsApiToolChoiceOptionMode\"], + AgentsNamedToolChoice""" + response_format: "_types.AgentsApiResponseFormatOption" = rest_field() + """The response format of the tool calls used in this run. Required. Is one of the following + types: str, Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + tool_resources: Optional["_models.UpdateToolResourcesOptions"] = rest_field() + """Override the tools the agent can use for this run. This is useful for modifying the behavior on + a per-run basis.""" + parallel_tool_calls: Optional[bool] = rest_field(name="parallelToolCalls") + """Determines if tools can be executed in parallel within the run.""" + + @overload + def __init__( # pylint: disable=too-many-locals + self, + *, + id: str, # pylint: disable=redefined-builtin + thread_id: str, + assistant_id: str, + status: Union[str, "_models.RunStatus"], + last_error: "_models.RunError", + model: str, + instructions: str, + tools: List["_models.ToolDefinition"], + created_at: datetime.datetime, + expires_at: datetime.datetime, + started_at: datetime.datetime, + completed_at: datetime.datetime, + cancelled_at: datetime.datetime, + failed_at: datetime.datetime, + incomplete_details: Union[str, "_models.IncompleteRunDetails"], + usage: "_models.RunCompletionUsage", + max_prompt_tokens: int, + max_completion_tokens: int, + truncation_strategy: "_models.TruncationObject", + tool_choice: "_types.AgentsApiToolChoiceOption", + response_format: "_types.AgentsApiResponseFormatOption", + metadata: Dict[str, str], + required_action: Optional["_models.RequiredAction"] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + tool_resources: Optional["_models.UpdateToolResourcesOptions"] = None, + parallel_tool_calls: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run"] = "thread.run" + + +class ToolOutput(_model_base.Model): + """The data provided during a tool outputs submission to resolve pending tool calls and allow the + model to continue. + + :ivar tool_call_id: The ID of the tool call being resolved, as provided in the tool calls of a + required action from a run. + :vartype tool_call_id: str + :ivar output: The output from the tool to be submitted. + :vartype output: str + """ + + tool_call_id: Optional[str] = rest_field() + """The ID of the tool call being resolved, as provided in the tool calls of a required action from + a run.""" + output: Optional[str] = rest_field() + """The output from the tool to be submitted.""" + + @overload + def __init__( + self, + *, + tool_call_id: Optional[str] = None, + output: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolResources(_model_base.Model): + """A set of resources that are used by the agent's tools. The resources are specific to the type + of + tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` + tool requires a list of vector store IDs. + + :ivar code_interpreter: Resources to be used by the ``code_interpreter tool`` consisting of + file IDs. + :vartype code_interpreter: ~azure.ai.project.models.CodeInterpreterToolResource + :ivar file_search: Resources to be used by the ``file_search`` tool consisting of vector store + IDs. + :vartype file_search: ~azure.ai.project.models.FileSearchToolResource + :ivar bing_grounding: Resources to be used by the ``bing_grounding`` tool consisting of + connection IDs. + :vartype bing_grounding: ~azure.ai.project.models.ConnectionListResource + :ivar microsoft_fabric: Resources to be used by the ``microsoft_fabric`` tool consisting of + connection IDs. + :vartype microsoft_fabric: ~azure.ai.project.models.ConnectionListResource + :ivar share_point: Resources to be used by the ``sharepoint`` tool consisting of connection + IDs. + :vartype share_point: ~azure.ai.project.models.ConnectionListResource + :ivar azure_ai_search: Resources to be used by the ``azure_ai_search`` tool consisting of index + IDs and names. + :vartype azure_ai_search: ~azure.ai.project.models.AzureAISearchResource + """ + + code_interpreter: Optional["_models.CodeInterpreterToolResource"] = rest_field() + """Resources to be used by the ``code_interpreter tool`` consisting of file IDs.""" + file_search: Optional["_models.FileSearchToolResource"] = rest_field() + """Resources to be used by the ``file_search`` tool consisting of vector store IDs.""" + bing_grounding: Optional["_models.ConnectionListResource"] = rest_field() + """Resources to be used by the ``bing_grounding`` tool consisting of connection IDs.""" + microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() + """Resources to be used by the ``microsoft_fabric`` tool consisting of connection IDs.""" + share_point: Optional["_models.ConnectionListResource"] = rest_field(name="sharepoint") + """Resources to be used by the ``sharepoint`` tool consisting of connection IDs.""" + azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() + """Resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names.""" + + @overload + def __init__( + self, + *, + code_interpreter: Optional["_models.CodeInterpreterToolResource"] = None, + file_search: Optional["_models.FileSearchToolResource"] = None, + bing_grounding: Optional["_models.ConnectionListResource"] = None, + microsoft_fabric: Optional["_models.ConnectionListResource"] = None, + share_point: Optional["_models.ConnectionListResource"] = None, + azure_ai_search: Optional["_models.AzureAISearchResource"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TruncationObject(_model_base.Model): + """Controls for how a thread will be truncated prior to the run. Use this to control the initial + context window of the run. + + + :ivar type: The truncation strategy to use for the thread. The default is ``auto``. If set to + ``last_messages``\\ , the thread will + be truncated to the ``lastMessages`` count most recent messages in the thread. When set to + ``auto``\\ , messages in the middle of the thread + will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known + values are: "auto" and "last_messages". + :vartype type: str or ~azure.ai.project.models.TruncationStrategy + :ivar last_messages: The number of most recent messages from the thread when constructing the + context for the run. + :vartype last_messages: int + """ + + type: Union[str, "_models.TruncationStrategy"] = rest_field() + """The truncation strategy to use for the thread. The default is ``auto``. If set to + ``last_messages``\ , the thread will + be truncated to the ``lastMessages`` count most recent messages in the thread. When set to + ``auto``\ , messages in the middle of the thread + will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known + values are: \"auto\" and \"last_messages\".""" + last_messages: Optional[int] = rest_field() + """The number of most recent messages from the thread when constructing the context for the run.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.TruncationStrategy"], + last_messages: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UpdateCodeInterpreterToolResourceOptions(_model_base.Model): + """Request object to update ``code_interpreted`` tool resources. + + :ivar file_ids: A list of file IDs to override the current list of the agent. + :vartype file_ids: list[str] + """ + + file_ids: Optional[List[str]] = rest_field() + """A list of file IDs to override the current list of the agent.""" + + @overload + def __init__( + self, + *, + file_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UpdateFileSearchToolResourceOptions(_model_base.Model): + """Request object to update ``file_search`` tool resources. + + :ivar vector_store_ids: A list of vector store IDs to override the current list of the agent. + :vartype vector_store_ids: list[str] + """ + + vector_store_ids: Optional[List[str]] = rest_field() + """A list of vector store IDs to override the current list of the agent.""" + + @overload + def __init__( + self, + *, + vector_store_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UpdateToolResourcesOptions(_model_base.Model): + """Request object. A set of resources that are used by the agent's tools. The resources are + specific to the type of tool. + For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list of + vector store IDs. + + :ivar code_interpreter: Overrides the list of file IDs made available to the + ``code_interpreter`` tool. There can be a maximum of 20 files + associated with the tool. + :vartype code_interpreter: ~azure.ai.project.models.UpdateCodeInterpreterToolResourceOptions + :ivar file_search: Overrides the vector store attached to this agent. There can be a maximum of + 1 vector store attached to the agent. + :vartype file_search: ~azure.ai.project.models.UpdateFileSearchToolResourceOptions + :ivar bing_grounding: Overrides the list of connections to be used by the ``bing_grounding`` + tool consisting of connection IDs. + :vartype bing_grounding: ~azure.ai.project.models.ConnectionListResource + :ivar microsoft_fabric: Overrides the list of connections to be used by the + ``microsoft_fabric`` tool consisting of connection IDs. + :vartype microsoft_fabric: ~azure.ai.project.models.ConnectionListResource + :ivar share_point: Overrides the list of connections to be used by the ``sharepoint`` tool + consisting of connection IDs. + :vartype share_point: ~azure.ai.project.models.ConnectionListResource + :ivar azure_ai_search: Overrides the resources to be used by the ``azure_ai_search`` tool + consisting of index IDs and names. + :vartype azure_ai_search: ~azure.ai.project.models.AzureAISearchResource + """ + + code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = rest_field() + """Overrides the list of file IDs made available to the ``code_interpreter`` tool. There can be a + maximum of 20 files + associated with the tool.""" + file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = rest_field() + """Overrides the vector store attached to this agent. There can be a maximum of 1 vector store + attached to the agent.""" + bing_grounding: Optional["_models.ConnectionListResource"] = rest_field() + """Overrides the list of connections to be used by the ``bing_grounding`` tool consisting of + connection IDs.""" + microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() + """Overrides the list of connections to be used by the ``microsoft_fabric`` tool consisting of + connection IDs.""" + share_point: Optional["_models.ConnectionListResource"] = rest_field(name="sharepoint") + """Overrides the list of connections to be used by the ``sharepoint`` tool consisting of + connection IDs.""" + azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() + """Overrides the resources to be used by the ``azure_ai_search`` tool consisting of index IDs and + names.""" + + @overload + def __init__( + self, + *, + code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = None, + file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = None, + bing_grounding: Optional["_models.ConnectionListResource"] = None, + microsoft_fabric: Optional["_models.ConnectionListResource"] = None, + share_point: Optional["_models.ConnectionListResource"] = None, + azure_ai_search: Optional["_models.AzureAISearchResource"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStore(_model_base.Model): + """A vector store is a collection of processed files can be used by the ``file_search`` tool. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store``. Required. Default value is + "vector_store". + :vartype object: str + :ivar created_at: The Unix timestamp (in seconds) for when the vector store was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar name: The name of the vector store. Required. + :vartype name: str + :ivar usage_bytes: The total number of bytes used by the files in the vector store. Required. + :vartype usage_bytes: int + :ivar file_counts: Files count grouped by status processed or being processed by this vector + store. Required. + :vartype file_counts: ~azure.ai.project.models.VectorStoreFileCount + :ivar status: The status of the vector store, which can be either ``expired``\\ , + ``in_progress``\\ , or ``completed``. A status of ``completed`` indicates that the vector store + is ready for use. Required. Known values are: "expired", "in_progress", and "completed". + :vartype status: str or ~azure.ai.project.models.VectorStoreStatus + :ivar expires_after: Details on when this vector store expires. + :vartype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy + :ivar expires_at: The Unix timestamp (in seconds) for when the vector store will expire. + :vartype expires_at: ~datetime.datetime + :ivar last_active_at: The Unix timestamp (in seconds) for when the vector store was last + active. Required. + :vartype last_active_at: ~datetime.datetime + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store"] = rest_field() + """The object type, which is always ``vector_store``. Required. Default value is \"vector_store\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store was created. Required.""" + name: str = rest_field() + """The name of the vector store. Required.""" + usage_bytes: int = rest_field() + """The total number of bytes used by the files in the vector store. Required.""" + file_counts: "_models.VectorStoreFileCount" = rest_field() + """Files count grouped by status processed or being processed by this vector store. Required.""" + status: Union[str, "_models.VectorStoreStatus"] = rest_field() + """The status of the vector store, which can be either ``expired``\ , ``in_progress``\ , or + ``completed``. A status of ``completed`` indicates that the vector store is ready for use. + Required. Known values are: \"expired\", \"in_progress\", and \"completed\".""" + expires_after: Optional["_models.VectorStoreExpirationPolicy"] = rest_field() + """Details on when this vector store expires.""" + expires_at: Optional[datetime.datetime] = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store will expire.""" + last_active_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store was last active. Required.""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + name: str, + usage_bytes: int, + file_counts: "_models.VectorStoreFileCount", + status: Union[str, "_models.VectorStoreStatus"], + last_active_at: datetime.datetime, + metadata: Dict[str, str], + expires_after: Optional["_models.VectorStoreExpirationPolicy"] = None, + expires_at: Optional[datetime.datetime] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store"] = "vector_store" + + +class VectorStoreChunkingStrategyRequest(_model_base.Model): + """An abstract representation of a vector store chunking strategy configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + VectorStoreAutoChunkingStrategyRequest, VectorStoreStaticChunkingStrategyRequest + + All required parameters must be populated in order to send to server. + + :ivar type: The object type. Required. Known values are: "auto" and "static". + :vartype type: str or ~azure.ai.project.models.VectorStoreChunkingStrategyRequestType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Known values are: \"auto\" and \"static\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreAutoChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="auto"): + """The default strategy. This strategy currently uses a max_chunk_size_tokens of 800 and + chunk_overlap_tokens of 400. + + All required parameters must be populated in order to send to server. + + :ivar type: The object type, which is always 'auto'. Required. + :vartype type: str or ~azure.ai.project.models.AUTO + """ + + type: Literal[VectorStoreChunkingStrategyRequestType.AUTO] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'auto'. Required.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.AUTO, **kwargs) + + +class VectorStoreChunkingStrategyResponse(_model_base.Model): + """An abstract representation of a vector store chunking strategy configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + VectorStoreAutoChunkingStrategyResponse, VectorStoreStaticChunkingStrategyResponse + + + :ivar type: The object type. Required. Known values are: "other" and "static". + :vartype type: str or ~azure.ai.project.models.VectorStoreChunkingStrategyResponseType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Known values are: \"other\" and \"static\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreAutoChunkingStrategyResponse(VectorStoreChunkingStrategyResponse, discriminator="other"): + """This is returned when the chunking strategy is unknown. Typically, this is because the file was + indexed before the chunking_strategy concept was introduced in the API. + + + :ivar type: The object type, which is always 'other'. Required. + :vartype type: str or ~azure.ai.project.models.OTHER + """ + + type: Literal[VectorStoreChunkingStrategyResponseType.OTHER] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'other'. Required.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.OTHER, **kwargs) + + +class VectorStoreDeletionStatus(_model_base.Model): + """Response object for deleting a vector store. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value + is "vector_store.deleted". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["vector_store.deleted"] = rest_field() + """The object type, which is always 'vector_store.deleted'. Required. Default value is + \"vector_store.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.deleted"] = "vector_store.deleted" + + +class VectorStoreExpirationPolicy(_model_base.Model): + """The expiration policy for a vector store. + + + :ivar anchor: Anchor timestamp after which the expiration policy applies. Supported anchors: + ``last_active_at``. Required. "last_active_at" + :vartype anchor: str or ~azure.ai.project.models.VectorStoreExpirationPolicyAnchor + :ivar days: The anchor timestamp after which the expiration policy applies. Required. + :vartype days: int + """ + + anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"] = rest_field() + """Anchor timestamp after which the expiration policy applies. Supported anchors: + ``last_active_at``. Required. \"last_active_at\"""" + days: int = rest_field() + """The anchor timestamp after which the expiration policy applies. Required.""" + + @overload + def __init__( + self, + *, + anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"], + days: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreFile(_model_base.Model): + """Description of a file attached to a vector store. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store.file``. Required. Default value + is "vector_store.file". + :vartype object: str + :ivar usage_bytes: The total vector store usage in bytes. Note that this may be different from + the original file + size. Required. + :vartype usage_bytes: int + :ivar created_at: The Unix timestamp (in seconds) for when the vector store file was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. + :vartype vector_store_id: str + :ivar status: The status of the vector store file, which can be either ``in_progress``\\ , + ``completed``\\ , ``cancelled``\\ , or ``failed``. The status ``completed`` indicates that the + vector store file is ready for use. Required. Known values are: "in_progress", "completed", + "failed", and "cancelled". + :vartype status: str or ~azure.ai.project.models.VectorStoreFileStatus + :ivar last_error: The last error associated with this vector store file. Will be ``null`` if + there are no errors. Required. + :vartype last_error: ~azure.ai.project.models.VectorStoreFileError + :ivar chunking_strategy: The strategy used to chunk the file. Required. + :vartype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyResponse + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store.file"] = rest_field() + """The object type, which is always ``vector_store.file``. Required. Default value is + \"vector_store.file\".""" + usage_bytes: int = rest_field() + """The total vector store usage in bytes. Note that this may be different from the original file + size. Required.""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store file was created. Required.""" + vector_store_id: str = rest_field() + """The ID of the vector store that the file is attached to. Required.""" + status: Union[str, "_models.VectorStoreFileStatus"] = rest_field() + """The status of the vector store file, which can be either ``in_progress``\ , ``completed``\ , + ``cancelled``\ , or ``failed``. The status ``completed`` indicates that the vector store file + is ready for use. Required. Known values are: \"in_progress\", \"completed\", \"failed\", and + \"cancelled\".""" + last_error: "_models.VectorStoreFileError" = rest_field() + """The last error associated with this vector store file. Will be ``null`` if there are no errors. + Required.""" + chunking_strategy: "_models.VectorStoreChunkingStrategyResponse" = rest_field() + """The strategy used to chunk the file. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + usage_bytes: int, + created_at: datetime.datetime, + vector_store_id: str, + status: Union[str, "_models.VectorStoreFileStatus"], + last_error: "_models.VectorStoreFileError", + chunking_strategy: "_models.VectorStoreChunkingStrategyResponse", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.file"] = "vector_store.file" + + +class VectorStoreFileBatch(_model_base.Model): + """A batch of files attached to a vector store. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store.file_batch``. Required. Default + value is "vector_store.files_batch". + :vartype object: str + :ivar created_at: The Unix timestamp (in seconds) for when the vector store files batch was + created. Required. + :vartype created_at: ~datetime.datetime + :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. + :vartype vector_store_id: str + :ivar status: The status of the vector store files batch, which can be either ``in_progress``\\ + , ``completed``\\ , ``cancelled`` or ``failed``. Required. Known values are: "in_progress", + "completed", "cancelled", and "failed". + :vartype status: str or ~azure.ai.project.models.VectorStoreFileBatchStatus + :ivar file_counts: Files count grouped by status processed or being processed by this vector + store. Required. + :vartype file_counts: ~azure.ai.project.models.VectorStoreFileCount + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store.files_batch"] = rest_field() + """The object type, which is always ``vector_store.file_batch``. Required. Default value is + \"vector_store.files_batch\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store files batch was created. Required.""" + vector_store_id: str = rest_field() + """The ID of the vector store that the file is attached to. Required.""" + status: Union[str, "_models.VectorStoreFileBatchStatus"] = rest_field() + """The status of the vector store files batch, which can be either ``in_progress``\ , + ``completed``\ , ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", + \"completed\", \"cancelled\", and \"failed\".""" + file_counts: "_models.VectorStoreFileCount" = rest_field() + """Files count grouped by status processed or being processed by this vector store. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + vector_store_id: str, + status: Union[str, "_models.VectorStoreFileBatchStatus"], + file_counts: "_models.VectorStoreFileCount", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.files_batch"] = "vector_store.files_batch" + + +class VectorStoreFileCount(_model_base.Model): + """Counts of files processed or being processed by this vector store grouped by status. + + + :ivar in_progress: The number of files that are currently being processed. Required. + :vartype in_progress: int + :ivar completed: The number of files that have been successfully processed. Required. + :vartype completed: int + :ivar failed: The number of files that have failed to process. Required. + :vartype failed: int + :ivar cancelled: The number of files that were cancelled. Required. + :vartype cancelled: int + :ivar total: The total number of files. Required. + :vartype total: int + """ + + in_progress: int = rest_field() + """The number of files that are currently being processed. Required.""" + completed: int = rest_field() + """The number of files that have been successfully processed. Required.""" + failed: int = rest_field() + """The number of files that have failed to process. Required.""" + cancelled: int = rest_field() + """The number of files that were cancelled. Required.""" + total: int = rest_field() + """The total number of files. Required.""" + + @overload + def __init__( + self, + *, + in_progress: int, + completed: int, + failed: int, + cancelled: int, + total: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreFileDeletionStatus(_model_base.Model): + """Response object for deleting a vector store file relationship. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value + is "vector_store.file.deleted". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["vector_store.file.deleted"] = rest_field() + """The object type, which is always 'vector_store.deleted'. Required. Default value is + \"vector_store.file.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.file.deleted"] = "vector_store.file.deleted" + + +class VectorStoreFileError(_model_base.Model): + """Details on the error that may have ocurred while processing a file for this vector store. + + + :ivar code: One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: + "internal_error", "file_not_found", "parsing_error", and "unhandled_mime_type". + :vartype code: str or ~azure.ai.project.models.VectorStoreFileErrorCode + :ivar message: A human-readable description of the error. Required. + :vartype message: str + """ + + code: Union[str, "_models.VectorStoreFileErrorCode"] = rest_field() + """One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: + \"internal_error\", \"file_not_found\", \"parsing_error\", and \"unhandled_mime_type\".""" + message: str = rest_field() + """A human-readable description of the error. Required.""" + + @overload + def __init__( + self, + *, + code: Union[str, "_models.VectorStoreFileErrorCode"], + message: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreStaticChunkingStrategyOptions(_model_base.Model): + """Options to configure a vector store static chunking strategy. + + + :ivar max_chunk_size_tokens: The maximum number of tokens in each chunk. The default value is + 800. The minimum value is 100 and the maximum value is 4096. Required. + :vartype max_chunk_size_tokens: int + :ivar chunk_overlap_tokens: The number of tokens that overlap between chunks. The default value + is 400. + Note that the overlap must not exceed half of max_chunk_size_tokens. *. Required. + :vartype chunk_overlap_tokens: int + """ + + max_chunk_size_tokens: int = rest_field() + """The maximum number of tokens in each chunk. The default value is 800. The minimum value is 100 + and the maximum value is 4096. Required.""" + chunk_overlap_tokens: int = rest_field() + """The number of tokens that overlap between chunks. The default value is 400. + Note that the overlap must not exceed half of max_chunk_size_tokens. *. Required.""" + + @overload + def __init__( + self, + *, + max_chunk_size_tokens: int, + chunk_overlap_tokens: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreStaticChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="static"): + """A statically configured chunking strategy. + + All required parameters must be populated in order to send to server. + + :ivar type: The object type, which is always 'static'. Required. + :vartype type: str or ~azure.ai.project.models.STATIC + :ivar static: The options for the static chunking strategy. Required. + :vartype static: ~azure.ai.project.models.VectorStoreStaticChunkingStrategyOptions + """ + + type: Literal[VectorStoreChunkingStrategyRequestType.STATIC] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'static'. Required.""" + static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field() + """The options for the static chunking strategy. Required.""" + + @overload + def __init__( + self, + *, + static: "_models.VectorStoreStaticChunkingStrategyOptions", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.STATIC, **kwargs) + + +class VectorStoreStaticChunkingStrategyResponse( + VectorStoreChunkingStrategyResponse, discriminator="static" +): # pylint: disable=name-too-long + """A statically configured chunking strategy. + + + :ivar type: The object type, which is always 'static'. Required. + :vartype type: str or ~azure.ai.project.models.STATIC + :ivar static: The options for the static chunking strategy. Required. + :vartype static: ~azure.ai.project.models.VectorStoreStaticChunkingStrategyOptions + """ + + type: Literal[VectorStoreChunkingStrategyResponseType.STATIC] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'static'. Required.""" + static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field() + """The options for the static chunking strategy. Required.""" + + @overload + def __init__( + self, + *, + static: "_models.VectorStoreStaticChunkingStrategyOptions", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.STATIC, **kwargs) diff --git a/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-project/azure/ai/project/operations/__init__.py b/sdk/ai/azure-ai-project/azure/ai/project/operations/__init__.py new file mode 100644 index 000000000000..35cf92df96bc --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/operations/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import AgentsOperations # type: ignore +from ._operations import ConnectionsOperations # type: ignore +from ._operations import EvaluationsOperations # type: ignore + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AgentsOperations", + "ConnectionsOperations", + "EvaluationsOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/ai/azure-ai-project/azure/ai/project/operations/_operations.py b/sdk/ai/azure-ai-project/azure/ai/project/operations/_operations.py new file mode 100644 index 000000000000..a427d96f11f2 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/operations/_operations.py @@ -0,0 +1,7396 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, Callable, Dict, IO, Iterable, List, Optional, TYPE_CHECKING, TypeVar, Union, overload +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import _model_base, models as _models +from .._model_base import SdkJSONEncoder, _deserialize +from .._serialization import Serializer +from .._vendor import FileType, prepare_multipart_form_data + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore + +if TYPE_CHECKING: + from .. import _types +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_agents_create_agent_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_agents_request( + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_update_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_thread_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_update_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_message_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_messages_request( + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if run_id is not None: + _params["runId"] = _SERIALIZER.query("run_id", run_id, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages/{messageId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "messageId": _SERIALIZER.url("message_id", message_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_update_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages/{messageId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "messageId": _SERIALIZER.url("message_id", message_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_run_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_runs_request( + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_update_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_submit_tool_outputs_to_run_request( # pylint: disable=name-too-long + thread_id: str, run_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/submit_tool_outputs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_cancel_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/cancel" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_thread_and_run_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/runs" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_run_step_request(thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/steps/{stepId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + "stepId": _SERIALIZER.url("step_id", step_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_run_steps_request( + thread_id: str, + run_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/steps" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_files_request( + *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if purpose is not None: + _params["purpose"] = _SERIALIZER.query("purpose", purpose, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_upload_file_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_file_request(file_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files/{fileId}" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_file_request(file_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files/{fileId}" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_file_content_request(file_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files/{fileId}/content" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_vector_stores_request( + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_vector_store_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_modify_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_vector_store_files_request( # pylint: disable=name-too-long + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if filter is not None: + _params["filter"] = _SERIALIZER.query("filter", filter, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, file_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files/{fileId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, file_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files/{fileId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, batch_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_cancel_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, batch_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/cancel" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_vector_store_file_batch_files_request( # pylint: disable=name-too-long + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if filter is not None: + _params["filter"] = _SERIALIZER.query("filter", filter, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_connections_list_request( + *, + category: Optional[Union[str, _models.ConnectionType]] = None, + include_all: Optional[bool] = None, + target: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/connections" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if category is not None: + _params["category"] = _SERIALIZER.query("category", category, "str") + if include_all is not None: + _params["includeAll"] = _SERIALIZER.query("include_all", include_all, "bool") + if target is not None: + _params["target"] = _SERIALIZER.query("target", target, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_connections_get_request(connection_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/connections/{connectionName}" + path_format_arguments = { + "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_connections_list_secrets_request(connection_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/connections/{connectionName}/listsecrets" + path_format_arguments = { + "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_get_request(id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/runs/{id}" + path_format_arguments = { + "id": _SERIALIZER.url("id", id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_create_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("apiVersion", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/runs:run" + + # Construct parameters + _params["apiVersion"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_list_request( + *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/runs" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + if skip is not None: + _params["skip"] = _SERIALIZER.query("skip", skip, "int") + if maxpagesize is not None: + _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_update_request(id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/runs/{id}" + path_format_arguments = { + "id": _SERIALIZER.url("id", id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_get_schedule_request(name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/schedules/{name}" + path_format_arguments = { + "name": _SERIALIZER.url("name", name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_create_or_replace_schedule_request( # pylint: disable=name-too-long + name: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/schedules/{name}" + path_format_arguments = { + "name": _SERIALIZER.url("name", name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_list_schedule_request( + *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/schedules" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + if skip is not None: + _params["skip"] = _SERIALIZER.query("skip", skip, "int") + if maxpagesize is not None: + _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_delete_schedule_request(name: str, **kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/schedules/{name}" + path_format_arguments = { + "name": _SERIALIZER.url("name", name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +class AgentsOperations: # pylint: disable=too-many-public-methods + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.project.AIProjectClient`'s + :attr:`agents` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + if model is _Unset: + raise TypeError("missing required argument: model") + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_agent_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_agents( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfAgent: + """Gets a list of agents that were previously created. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with + MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfAgent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) + + _request = build_agents_list_agents_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: + """Retrieves an existing agent. + + :param assistant_id: Identifier of the agent. Required. + :type assistant_id: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + _request = build_agents_get_agent_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_agent( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_agent( + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_agent( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_agent( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_agent_request( + assistant_id=assistant_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: + """Deletes an agent. + + :param assistant_id: Identifier of the agent. Required. + :type assistant_id: str + :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_agent_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_thread( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread( + self, + *, + content_type: str = "application/json", + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.project.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_thread( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.project.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_thread_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: + """Gets information about an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + _request = build_agents_get_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_thread( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_thread( + self, + thread_id: str, + *, + content_type: str = "application/json", + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_thread( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_thread( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.project.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_thread_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: + """Deletes an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_message( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_message( + self, + thread_id: str, + *, + role: Union[str, _models.MessageRole], + content: str, + content_type: str = "application/json", + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.project.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.project.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_message( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_message( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + role: Union[str, _models.MessageRole] = _Unset, + content: str = _Unset, + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.project.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.project.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + if role is _Unset: + raise TypeError("missing required argument: role") + if content is _Unset: + raise TypeError("missing required argument: content") + body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_message_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_messages( + self, + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadMessage: + """Gets a list of messages that exist on a thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword run_id: Filter messages by the run ID that generated them. Default value is None. + :paramtype run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible + with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) + + _request = build_agents_list_messages_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: + """Gets an existing message from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + _request = build_agents_get_message_request( + thread_id=thread_id, + message_id=message_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_message( + self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_message( + self, + thread_id: str, + message_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_message( + self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_message( + self, + thread_id: str, + message_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_message_request( + thread_id=thread_id, + message_id=message_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_run( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "additional_instructions": additional_instructions, + "additional_messages": additional_messages, + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_run_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_runs( + self, + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadRun: + """Gets a list of runs for a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with + MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_list_runs_request( + thread_id=thread_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Gets an existing run from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_get_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_run( + self, + thread_id: str, + run_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if tool_outputs is _Unset: + raise TypeError("missing required argument: tool_outputs") + body = {"stream": stream_parameter, "tool_outputs": tool_outputs} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_submit_tool_outputs_to_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Cancels a run of an in progress thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_cancel_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_thread_and_run( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread_and_run( + self, + *, + assistant_id: str, + content_type: str = "application/json", + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :keyword assistant_id: The ID of the agent for which the thread should be created. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.project.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread_and_run( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_thread_and_run( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent for which the thread should be created. Required. + :paramtype assistant_id: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.project.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "thread": thread, + "tool_choice": tool_choice, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_thread_and_run_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> _models.RunStep: + """Gets a single run step from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param step_id: Identifier of the run step. Required. + :type step_id: str + :return: RunStep. The RunStep is compatible with MutableMapping + :rtype: ~azure.ai.project.models.RunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) + + _request = build_agents_get_run_step_request( + thread_id=thread_id, + run_id=run_id, + step_id=step_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.RunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_run_steps( + self, + thread_id: str, + run_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfRunStep: + """Gets a list of run steps from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with + MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfRunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) + + _request = build_agents_list_run_steps_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_files( + self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any + ) -> _models.FileListResponse: + """Gets a list of previously uploaded files. + + :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is + None. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :return: FileListResponse. The FileListResponse is compatible with MutableMapping + :rtype: ~azure.ai.project.models.FileListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) + + _request = build_agents_list_files_request( + purpose=purpose, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileListResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: The file data, in bytes. Required. + :paramtype file: ~azure.ai.project._vendor.FileType + :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and + Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :keyword filename: The name of the file. Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def upload_file( + self, + body: JSON = _Unset, + *, + file: FileType = _Unset, + purpose: Union[str, _models.FilePurpose] = _Unset, + filename: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Is one of the following types: JSON Required. + :type body: JSON + :keyword file: The file data, in bytes. Required. + :paramtype file: ~azure.ai.project._vendor.FileType + :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and + Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :keyword filename: The name of the file. Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + if body is _Unset: + if file is _Unset: + raise TypeError("missing required argument: file") + if purpose is _Unset: + raise TypeError("missing required argument: purpose") + body = {"file": file, "filename": filename, "purpose": purpose} + body = {k: v for k, v in body.items() if v is not None} + _body = body.as_dict() if isinstance(body, _model_base.Model) else body + _file_fields: List[str] = ["file"] + _data_fields: List[str] = ["purpose", "filename"] + _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) + + _request = build_agents_upload_file_request( + api_version=self._config.api_version, + files=_files, + data=_data, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: + """Delete a previously uploaded file. + + :param file_id: The ID of the file to delete. Required. + :type file_id: str + :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.project.models.FileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _request = build_agents_get_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentResponse: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: FileContentResponse. The FileContentResponse is compatible with MutableMapping + :rtype: ~azure.ai.project.models.FileContentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileContentResponse] = kwargs.pop("cls", None) + + _request = build_agents_get_file_content_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileContentResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_stores( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStore: + """Returns a list of vector stores. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible + with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfVectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_stores_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "chunking_strategy": chunking_strategy, + "expires_after": expires_after, + "file_ids": file_ids, + "metadata": metadata, + "name": name, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: + """Returns the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def modify_vector_store( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def modify_vector_store( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def modify_vector_store( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def modify_vector_store( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"expires_after": expires_after, "metadata": metadata, "name": name} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_modify_vector_store_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: + """Deletes the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_store_files( + self, + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.project.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_store_files_request( + vector_store_id=vector_store_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store_file( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file( + self, + vector_store_id: str, + *, + file_id: str, + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_id: Identifier of the file. Required. + :paramtype file_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_id: str = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_id: Identifier of the file. Required. + :paramtype file_id: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + if body is _Unset: + if file_id is _Unset: + raise TypeError("missing required argument: file_id") + body = {"chunking_strategy": chunking_strategy, "file_id": file_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_file_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: + """Retrieves a vector store file. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_vector_store_file( + self, vector_store_id: str, file_id: str, **kwargs: Any + ) -> _models.VectorStoreFileDeletionStatus: + """Delete a vector store file. This will remove the file from the vector store but the file itself + will not be deleted. + To delete the file, use the delete file endpoint. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store_file_batch( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch( + self, + vector_store_id: str, + *, + file_ids: List[str], + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file_batch( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: List[str] = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + if body is _Unset: + if file_ids is _Unset: + raise TypeError("missing required argument: file_ids") + body = {"chunking_strategy": chunking_strategy, "file_ids": file_ids} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_file_batch_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Retrieve a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def cancel_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch + as soon as possible. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_agents_cancel_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_store_file_batch_files( + self, + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files in a batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.project.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.project.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_store_file_batch_files_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class ConnectionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.project.AIProjectClient`'s + :attr:`connections` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def _list( + self, + *, + category: Optional[Union[str, _models.ConnectionType]] = None, + include_all: Optional[bool] = None, + target: Optional[str] = None, + **kwargs: Any + ) -> _models._models.ConnectionsListResponse: + """List the details of all the connections (not including their credentials). + + :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". Default value is None. + :paramtype category: str or ~azure.ai.project.models.ConnectionType + :keyword include_all: Indicates whether to list datastores. Service default: do not list + datastores. Default value is None. + :paramtype include_all: bool + :keyword target: Target of the workspace connection. Default value is None. + :paramtype target: str + :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping + :rtype: ~azure.ai.project.models._models.ConnectionsListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) + + _request = build_connections_list_request( + category=category, + include_all=include_all, + target=target, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def _get(self, connection_name: str, **kwargs: Any) -> _models._models.ConnectionsListSecretsResponse: + """Get the details of a single connection, without credentials. + + :param connection_name: Connection Name. Required. + :type connection_name: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.project.models._models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + + _request = build_connections_get_request( + connection_name=connection_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def _list_secrets( + self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: ... + @overload + def _list_secrets( + self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: ... + @overload + def _list_secrets( + self, connection_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: ... + + @distributed_trace + def _list_secrets( + self, connection_name: str, body: Union[JSON, IO[bytes]] = _Unset, *, ignored: str = _Unset, **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: + """Get the details of a single connection, including credentials (if available). + + :param connection_name: Connection Name. Required. + :type connection_name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword ignored: The body is ignored. TODO: Can we remove this?. Required. + :paramtype ignored: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.project.models._models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + + if body is _Unset: + if ignored is _Unset: + raise TypeError("missing required argument: ignored") + body = {"ignored": ignored} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_connections_list_secrets_request( + connection_name=connection_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class EvaluationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.project.AIProjectClient`'s + :attr:`evaluations` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get(self, id: str, **kwargs: Any) -> _models.Evaluation: + """Resource read operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + _request = build_evaluations_get_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def create( + self, evaluation: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Required. + :type evaluation: ~azure.ai.project.models.Evaluation + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create(self, evaluation: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Required. + :type evaluation: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create( + self, evaluation: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Required. + :type evaluation: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Is one of the following types: Evaluation, JSON, + IO[bytes] Required. + :type evaluation: ~azure.ai.project.models.Evaluation or JSON or IO[bytes] + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(evaluation, (IOBase, bytes)): + _content = evaluation + else: + _content = json.dumps(evaluation, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list( + self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> Iterable["_models.Evaluation"]: + """Resource list operation template. + + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of Evaluation + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.project.models.Evaluation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_list_request( + top=top, + skip=skip, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @overload + def update( + self, + id: str, + resource: _models.Evaluation, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: ~azure.ai.project.models.Evaluation + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update( + self, id: str, resource: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update( + self, id: str, resource: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update( + self, id: str, resource: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Is one of the following types: Evaluation, JSON, + IO[bytes] Required. + :type resource: ~azure.ai.project.models.Evaluation or JSON or IO[bytes] + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + content_type = content_type or "application/merge-patch+json" + _content = None + if isinstance(resource, (IOBase, bytes)): + _content = resource + else: + _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_update_request( + id=id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_schedule(self, name: str, **kwargs: Any) -> _models.EvaluationSchedule: + """Resource read operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.project.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) + + _request = build_evaluations_get_schedule_request( + name=name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def create_or_replace_schedule( + self, name: str, resource: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Required. + :type resource: ~azure.ai.project.models.EvaluationSchedule + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.project.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_replace_schedule( + self, name: str, resource: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Required. + :type resource: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.project.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_replace_schedule( + self, name: str, resource: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Required. + :type resource: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.project.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_or_replace_schedule( + self, name: str, resource: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Is one of the following types: EvaluationSchedule, + JSON, IO[bytes] Required. + :type resource: ~azure.ai.project.models.EvaluationSchedule or JSON or IO[bytes] + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.project.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(resource, (IOBase, bytes)): + _content = resource + else: + _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_create_or_replace_schedule_request( + name=name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_schedule( + self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> Iterable["_models.EvaluationSchedule"]: + """Resource list operation template. + + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of EvaluationSchedule + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.project.models.EvaluationSchedule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.EvaluationSchedule]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_list_schedule_request( + top=top, + skip=skip, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.EvaluationSchedule], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def delete_schedule(self, name: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Resource delete operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_evaluations_delete_schedule_request( + name=name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore diff --git a/sdk/ai/azure-ai-project/azure/ai/project/operations/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/operations/_patch.py new file mode 100644 index 000000000000..f7dd32510333 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-project/azure/ai/project/py.typed b/sdk/ai/azure-ai-project/azure/ai/project/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/ai/azure-ai-project/azure/ai/project/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/dev_requirements.txt b/sdk/ai/azure-ai-project/dev_requirements.txt new file mode 100644 index 000000000000..c82827bb56f4 --- /dev/null +++ b/sdk/ai/azure-ai-project/dev_requirements.txt @@ -0,0 +1,4 @@ +-e ../../../tools/azure-sdk-tools +../../core/azure-core +../../identity/azure-identity +aiohttp \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/setup.py b/sdk/ai/azure-ai-project/setup.py new file mode 100644 index 000000000000..6985effa6f6a --- /dev/null +++ b/sdk/ai/azure-ai-project/setup.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# coding: utf-8 + +import os +import re +from setuptools import setup, find_packages + + +PACKAGE_NAME = "azure-ai-project" +PACKAGE_PPRINT_NAME = "Azure Ai Project" + +# a-b-c => a/b/c +package_folder_path = PACKAGE_NAME.replace("-", "/") + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, "_version.py"), "r") as fd: + version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError("Cannot find version information") + + +setup( + name=PACKAGE_NAME, + version=version, + description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + long_description=open("README.md", "r").read(), + long_description_content_type="text/markdown", + license="MIT License", + author="Microsoft Corporation", + author_email="azpysdkhelp@microsoft.com", + url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", + keywords="azure, azure sdk", + classifiers=[ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "License :: OSI Approved :: MIT License", + ], + zip_safe=False, + packages=find_packages( + exclude=[ + "tests", + # Exclude packages that will be covered by PEP420 or nspkg + "azure", + "azure.ai", + ] + ), + include_package_data=True, + package_data={ + "azure.ai.project": ["py.typed"], + }, + install_requires=[ + "isodate>=0.6.1", + "azure-core>=1.30.0", + "typing-extensions>=4.6.0", + ], + python_requires=">=3.8", +) diff --git a/sdk/ai/azure-ai-project/tsp-location.yaml b/sdk/ai/azure-ai-project/tsp-location.yaml new file mode 100644 index 000000000000..588cfb7e08b3 --- /dev/null +++ b/sdk/ai/azure-ai-project/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/ai/Azure.AI.Project +commit: 271f7fac192ac0d0c94a531b98aa9d94b7816d4c +repo: Azure/azure-rest-api-specs +additionalDirectories: From 6b38bd9325824250db291a32a954b19bdb8a7a2d Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 22 Oct 2024 13:10:57 -0700 Subject: [PATCH 044/138] Copy _patch.py, samples, test, update names --- .../azure/ai/project/_patch.py | 230 +- .../azure/ai/project/aio/_patch.py | 184 +- .../azure/ai/project/aio/operations/_patch.py | 1961 +++++++++++++++- .../azure/ai/project/models/_patch.py | 982 +++++++- .../azure/ai/project/operations/_patch.py | 1966 ++++++++++++++++- .../sample_agents_basics_async.py | 76 + .../sample_agents_functions_async.py | 117 + ...sample_agents_stream_eventhandler_async.py | 96 + ..._stream_eventhandler_with_toolset_async.py | 111 + .../sample_agents_stream_iteration_async.py | 92 + ...ts_vector_store_batch_file_search_async.py | 94 + ...gents_with_file_search_attachment_async.py | 83 + .../async_samples/user_async_functions.py | 29 + .../samples/agents/product_info_1.md | 51 + .../samples/agents/sample_agents_basics.py | 63 + ...mple_agents_code_interpreter_attachment.py | 80 + .../agents/sample_agents_file_search.py | 87 + .../samples/agents/sample_agents_functions.py | 105 + .../agents/sample_agents_run_with_toolset.py | 80 + .../sample_agents_stream_eventhandler.py | 98 + ...ents_stream_eventhandler_with_functions.py | 132 ++ ...agents_stream_eventhandler_with_toolset.py | 109 + .../agents/sample_agents_stream_iteration.py | 92 + ...le_agents_stream_iteration_with_toolset.py | 122 + ...e_agents_vector_store_batch_file_search.py | 88 + ...mple_agents_with_file_search_attachment.py | 75 + .../samples/agents/user_functions.py | 65 + .../async_samples/sample_connections_async.py | 139 ++ .../samples/connections/sample_connections.py | 120 + .../evaluations/evaluate_test_data.jsonl | 3 + .../samples/evaluations/sample_evaluations.py | 88 + .../sample_evaluations_schedules.py | 69 + .../sample_get_azure_openai_client_async.py | 57 + ...ample_get_chat_completions_client_async.py | 49 + .../sample_get_embeddings_client_async.py | 54 + .../sample_get_azure_openai_client.py | 45 + .../sample_get_chat_completions_client.py | 38 + .../inference/sample_get_embeddings_client.py | 42 + sdk/ai/azure-ai-project/setup.py | 2 +- sdk/ai/azure-ai-project/tests/README.md | 79 + .../tests/agents/test_agents_client.py | 1119 ++++++++++ sdk/ai/azure-ai-project/tests/conftest.py | 20 + .../tests/endpoints/unit_tests.py | 198 ++ 43 files changed, 9379 insertions(+), 11 deletions(-) create mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_basics_async.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_functions_async.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_iteration_async.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/user_async_functions.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/product_info_1.md create mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_basics.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_code_interpreter_attachment.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_file_search.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_functions.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_run_with_toolset.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_functions.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_toolset.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration_with_toolset.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_vector_store_batch_file_search.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_with_file_search_attachment.py create mode 100644 sdk/ai/azure-ai-project/samples/agents/user_functions.py create mode 100644 sdk/ai/azure-ai-project/samples/connections/async_samples/sample_connections_async.py create mode 100644 sdk/ai/azure-ai-project/samples/connections/sample_connections.py create mode 100644 sdk/ai/azure-ai-project/samples/evaluations/evaluate_test_data.jsonl create mode 100644 sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations.py create mode 100644 sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations_schedules.py create mode 100644 sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_azure_openai_client_async.py create mode 100644 sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_chat_completions_client_async.py create mode 100644 sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_embeddings_client_async.py create mode 100644 sdk/ai/azure-ai-project/samples/inference/sample_get_azure_openai_client.py create mode 100644 sdk/ai/azure-ai-project/samples/inference/sample_get_chat_completions_client.py create mode 100644 sdk/ai/azure-ai-project/samples/inference/sample_get_embeddings_client.py create mode 100644 sdk/ai/azure-ai-project/tests/README.md create mode 100644 sdk/ai/azure-ai-project/tests/agents/test_agents_client.py create mode 100644 sdk/ai/azure-ai-project/tests/conftest.py create mode 100644 sdk/ai/azure-ai-project/tests/endpoints/unit_tests.py diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/_patch.py index f7dd32510333..53c3c5b6697b 100644 --- a/sdk/ai/azure-ai-project/azure/ai/project/_patch.py +++ b/sdk/ai/azure-ai-project/azure/ai/project/_patch.py @@ -6,9 +6,235 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List +import uuid +from os import PathLike +from pathlib import Path +from typing import List, Any, Union, Dict +from typing_extensions import Self +from azure.core.credentials import TokenCredential +from azure.core import PipelineClient +from azure.core.pipeline import policies +from ._configuration import AIProjectClientConfiguration +from ._serialization import Deserializer, Serializer +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations +from ._client import AIProjectClient as ClientGenerated +from .operations._patch import InferenceOperations -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + +class AIProjectClient(ClientGenerated): + + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: "TokenCredential", + **kwargs: Any, + ) -> None: + # TODO: Validate input formats with regex match (e.g. subscription ID) + if not endpoint: + raise ValueError("endpoint is required") + if not subscription_id: + raise ValueError("subscription_id ID is required") + if not resource_group_name: + raise ValueError("resource_group_name is required") + if not project_name: + raise ValueError("project_name is required") + if not credential: + raise ValueError("Credential is required") + if "api_version" in kwargs: + raise ValueError("No support for overriding the API version") + if "credential_scopes" in kwargs: + raise ValueError("No support for overriding the credential scopes") + + kwargs1 = kwargs.copy() + kwargs2 = kwargs.copy() + kwargs3 = kwargs.copy() + + # For Endpoints operations (enumerating connections, getting SAS tokens) + _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + self._config1 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2024-07-01-preview", + credential_scopes=["https://management.azure.com"], + **kwargs1, + ) + _policies1 = kwargs1.pop("policies", None) + if _policies1 is None: + _policies1 = [ + policies.RequestIdPolicy(**kwargs1), + self._config1.headers_policy, + self._config1.user_agent_policy, + self._config1.proxy_policy, + policies.ContentDecodePolicy(**kwargs1), + self._config1.redirect_policy, + self._config1.retry_policy, + self._config1.authentication_policy, + self._config1.custom_hook_policy, + self._config1.logging_policy, + policies.DistributedTracingPolicy(**kwargs1), + policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, + self._config1.http_logging_policy, + ] + self._client1 = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) + + # For Agents operations + _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + self._config2 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2024-07-01-preview", # TODO: Update me + credential_scopes=["https://ml.azure.com"], + **kwargs2, + ) + _policies2 = kwargs2.pop("policies", None) + if _policies2 is None: + _policies2 = [ + policies.RequestIdPolicy(**kwargs2), + self._config2.headers_policy, + self._config2.user_agent_policy, + self._config2.proxy_policy, + policies.ContentDecodePolicy(**kwargs2), + self._config2.redirect_policy, + self._config2.retry_policy, + self._config2.authentication_policy, + self._config2.custom_hook_policy, + self._config2.logging_policy, + policies.DistributedTracingPolicy(**kwargs2), + policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, + self._config2.http_logging_policy, + ] + self._client2 = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) + + # For Cloud Evaluations operations + _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + self._config3 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2024-07-01-preview", # TODO: Update me + credential_scopes=["https://ml.azure.com"], # TODO: Update once service changes are ready + **kwargs3, + ) + _policies3 = kwargs3.pop("policies", None) + if _policies3 is None: + _policies3 = [ + policies.RequestIdPolicy(**kwargs3), + self._config3.headers_policy, + self._config3.user_agent_policy, + self._config3.proxy_policy, + policies.ContentDecodePolicy(**kwargs3), + self._config3.redirect_policy, + self._config3.retry_policy, + self._config3.authentication_policy, + self._config3.custom_hook_policy, + self._config3.logging_policy, + policies.DistributedTracingPolicy(**kwargs3), + policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, + self._config3.http_logging_policy, + ] + self._client3 = PipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) + self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) + self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) + self.inference = InferenceOperations(self) + + def close(self) -> None: + self._client1.close() + self._client2.close() + self._client3.close() + + def __enter__(self) -> Self: + self._client1.__enter__() + self._client2.__enter__() + self._client3.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client1.__exit__(*exc_details) + self._client2.__exit__(*exc_details) + self._client3.__exit__(*exc_details) + + @classmethod + def from_connection_string(cls, conn_str: str, credential: "TokenCredential", **kwargs) -> "AIProjectClient": + """ + Create an AIProjectClient from a connection string. + + :param conn_str: The connection string, copied from your AI Studio project. + """ + if not conn_str: + raise ValueError("Connection string is required") + parts = conn_str.split(";") + if len(parts) != 4: + raise ValueError("Invalid connection string format") + endpoint = "https://" + parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + project_name = parts[3] + return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) + + def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: + """Upload a file to the Azure AI Studio project. + This method required *azure-ai-ml* to be installed. + + :param file_path: The path to the file to upload. + :type file_path: Union[str, Path, PathLike] + :return: The asset id of uploaded file. + :rtype: str + """ + try: + from azure.ai.ml import MLClient + from azure.ai.ml.entities import Data + from azure.ai.ml.constants import AssetTypes + except ImportError: + raise ImportError( + "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`") + + data = Data( + path=file_path, + type=AssetTypes.URI_FILE, + name=str(uuid.uuid4()), # generating random name + is_anonymous=True, + version="1", + ) + + ml_client = MLClient( + self._config3.credential, + self._config3.subscription_id, + self._config3.resource_group_name, + self._config3.project_name, + ) + + data_asset = ml_client.data.create_or_update(data) + + return data_asset.id + + @property + def scope(self) -> Dict[str, str]: + return { + "subscription_id": self._config3.subscription_id, + "resource_group_name": self._config3.resource_group_name, + "project_name": self._config3.project_name, + } + +__all__: List[str] = [ + "AIProjectClient", +] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/_patch.py index f7dd32510333..d1a7e6d84569 100644 --- a/sdk/ai/azure-ai-project/azure/ai/project/aio/_patch.py +++ b/sdk/ai/azure-ai-project/azure/ai/project/aio/_patch.py @@ -6,9 +6,189 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List +from typing import List, Any +from azure.core import AsyncPipelineClient +from azure.core.pipeline import policies +from typing_extensions import Self -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +from .._serialization import Deserializer, Serializer +from ._configuration import AIProjectClientConfiguration +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations +from ._client import AIProjectClient as ClientGenerated +from .operations._patch import InferenceOperations + + +class AIProjectClient(ClientGenerated): + + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: "AsyncTokenCredential", + **kwargs: Any, + ) -> None: + # TODO: Validate input formats with regex match (e.g. subscription ID) + if not endpoint: + raise ValueError("endpoint is required") + if not subscription_id: + raise ValueError("subscription_id ID is required") + if not resource_group_name: + raise ValueError("resource_group_name is required") + if not project_name: + raise ValueError("project_name is required") + if not credential: + raise ValueError("Credential is required") + if "api_version" in kwargs: + raise ValueError("No support for overriding the API version") + if "credential_scopes" in kwargs: + raise ValueError("No support for overriding the credential scopes") + + kwargs1 = kwargs.copy() + kwargs2 = kwargs.copy() + kwargs3 = kwargs.copy() + + # For Endpoints operations (enumerating connections, getting SAS tokens) + _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + self._config1 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2024-07-01-preview", + credential_scopes=["https://management.azure.com"], + **kwargs1, + ) + _policies1 = kwargs1.pop("policies", None) + if _policies1 is None: + _policies1 = [ + policies.RequestIdPolicy(**kwargs1), + self._config1.headers_policy, + self._config1.user_agent_policy, + self._config1.proxy_policy, + policies.ContentDecodePolicy(**kwargs1), + self._config1.redirect_policy, + self._config1.retry_policy, + self._config1.authentication_policy, + self._config1.custom_hook_policy, + self._config1.logging_policy, + policies.DistributedTracingPolicy(**kwargs1), + policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, + self._config1.http_logging_policy, + ] + self._client1 = AsyncPipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) + + # For Agents operations + _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + self._config2 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2024-07-01-preview", # TODO: Update me + credential_scopes=["https://ml.azure.com"], + **kwargs2, + ) + _policies2 = kwargs2.pop("policies", None) + if _policies2 is None: + _policies2 = [ + policies.RequestIdPolicy(**kwargs2), + self._config2.headers_policy, + self._config2.user_agent_policy, + self._config2.proxy_policy, + policies.ContentDecodePolicy(**kwargs2), + self._config2.redirect_policy, + self._config2.retry_policy, + self._config2.authentication_policy, + self._config2.custom_hook_policy, + self._config2.logging_policy, + policies.DistributedTracingPolicy(**kwargs2), + policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, + self._config2.http_logging_policy, + ] + self._client2 = AsyncPipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) + + # For Cloud Evaluations operations + _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + self._config3 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2024-07-01-preview", # TODO: Update me + credential_scopes=["https://management.azure.com"], # TODO: Update once service changes are ready + **kwargs3, + ) + _policies3 = kwargs3.pop("policies", None) + if _policies3 is None: + _policies3 = [ + policies.RequestIdPolicy(**kwargs3), + self._config3.headers_policy, + self._config3.user_agent_policy, + self._config3.proxy_policy, + policies.ContentDecodePolicy(**kwargs3), + self._config3.redirect_policy, + self._config3.retry_policy, + self._config3.authentication_policy, + self._config3.custom_hook_policy, + self._config3.logging_policy, + policies.DistributedTracingPolicy(**kwargs3), + policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, + self._config3.http_logging_policy, + ] + self._client3 = AsyncPipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) + self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) + self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) + self.inference = InferenceOperations(self) + + async def close(self) -> None: + await self._client1.close() + await self._client2.close() + await self._client3.close() + + async def __aenter__(self) -> Self: + await self._client1.__aenter__() + await self._client2.__aenter__() + await self._client3.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client1.__aexit__(*exc_details) + await self._client2.__aexit__(*exc_details) + await self._client3.__aexit__(*exc_details) + + @classmethod + def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential", **kwargs) -> "AIProjectClient": + """ + Create an asynchronous AIProjectClient from a connection string. + + :param conn_str: The connection string, copied from your AI Studio project. + """ + if not conn_str: + raise ValueError("Connection string is required") + parts = conn_str.split(";") + if len(parts) != 4: + raise ValueError("Invalid connection string format") + endpoint = "https://" + parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + project_name = parts[3] + return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) + + +__all__: List[str] = [ + "AIProjectClient", +] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_patch.py index f7dd32510333..51d92bfd1ee9 100644 --- a/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_patch.py @@ -1,3 +1,5 @@ +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -6,9 +8,1964 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List +from ..._vendor import FileType +import io +import logging +import os +import time +from typing import IO, Any, AsyncIterator, Dict, List, Iterable, MutableMapping, Optional, Union, cast, overload -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +from azure.ai.project import _types +from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated +from ._operations import AgentsOperations as AgentsOperationsGenerated +from ...models._patch import ConnectionProperties +from ...models._enums import AuthenticationType, ConnectionType, FilePurpose +from ...models._models import ConnectionsListSecretsResponse, ConnectionsListResponse +from ... import models as _models +from azure.core.tracing.decorator_async import distributed_trace_async + +logger = logging.getLogger(__name__) + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() + + +class InferenceOperations: + + def __init__(self, outer_instance): + self.outer_instance = outer_instance + + @distributed_trace_async + async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": + """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for the default + Serverless connection. The Serverless connection must have a Chat Completions AI model deployment. + The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. + + :return: An authenticated chat completions client + :rtype: ~azure.ai.inference.models.ChatCompletionsClient + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connection = await self.outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + ) + if not connection: + raise ValueError("No serverless connection found") + + try: + from azure.ai.inference.aio import ChatCompletionsClient + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" + ) + + if connection.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" + ) + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) + ) + elif connection.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" + ) + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) + elif connection.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" + ) + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) + else: + raise ValueError("Unknown authentication type") + + return client + + @distributed_trace_async + async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": + """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for the default + Serverless connection. The Serverless connection must have a Text Embeddings AI model deployment. + The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. + + :return: An authenticated chat completions client + :rtype: ~azure.ai.inference.models.EmbeddingsClient + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connection = await self.outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + ) + if not connection: + raise ValueError("No serverless connection found") + + try: + from azure.ai.inference.aio import EmbeddingsClient + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" + ) + + if connection.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" + ) + from azure.core.credentials import AzureKeyCredential + + client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + elif connection.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" + ) + client = EmbeddingsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) + elif connection.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" + ) + client = EmbeddingsClient(endpoint=connection.connection_url, credential=connection.token_credential) + else: + raise ValueError("Unknown authentication type") + + return client + + @distributed_trace_async + async def get_azure_openai_client(self, **kwargs) -> "AsyncAzureOpenAI": + """Get an authenticated AsyncAzureOpenAI client (from the `openai` package) for the default + Azure OpenAI connection. The package `openai` must be installed prior to calling this method. + + :return: An authenticated AsyncAzureOpenAI client + :rtype: ~openai.AsyncAzureOpenAI + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connection = await self.outer_instance.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs + ) + if not connection: + raise ValueError("No Azure OpenAI connection found.") + + try: + from openai import AsyncAzureOpenAI + except ModuleNotFoundError as _: + raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai-async'") + + # Pick latest GA version from the "Data plane - Inference" row in the table + # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + AZURE_OPENAI_API_VERSION = "2024-06-01" + + if connection.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" + ) + client = AsyncAzureOpenAI( + api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION + ) + elif connection.authentication_type == AuthenticationType.AAD: + logger.debug( + "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" + ) + try: + from azure.identity import get_bearer_token_provider + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "azure.identity package not installed. Please install it using 'pip install azure.identity'" + ) + client = AsyncAzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider( + connection.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=connection.endpoint_url, + api_version=AZURE_OPENAI_API_VERSION, + ) + elif connection.authentication_type == AuthenticationType.SAS: + logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") + client = AsyncAzureOpenAI( + azure_ad_token_provider=get_bearer_token_provider( + connection.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=connection.endpoint_url, + api_version=AZURE_OPENAI_API_VERSION, + ) + else: + raise ValueError("Unknown authentication type") + + return client + + +class ConnectionsOperations(ConnectionsOperationsGenerated): + + @distributed_trace_async + async def get_default( + self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any + ) -> ConnectionProperties: + """Get the properties of the default connection of a certain connection type, with or without + populating authentication credentials. + + :param connection_type: The connection type. Required. + :type connection_type: ~azure.ai.project.models._models.ConnectionType + :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :type with_credentials: bool + :return: The connection properties + :rtype: ~azure.ai.project.models._models.ConnectionProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + if not connection_type: + raise ValueError("You must specify an connection type") + # Since there is no notion of default connection at the moment, list all connections in the category + # and return the first one + connection_properties_list = await self.list(connection_type=connection_type, **kwargs) + if len(connection_properties_list) > 0: + if with_credentials: + return await self.get( + connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs + ) + else: + return connection_properties_list[0] + else: + return None + + @distributed_trace_async + async def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: + """Get the properties of a single connection, given its connection name, with or without + populating authentication credentials. + + :param connection_name: Connection Name. Required. + :type connection_name: str + :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :type with_credentials: bool + :return: The connection properties + :rtype: ~azure.ai.project.models._models.ConnectionProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + if not connection_name: + raise ValueError("Endpoint name cannot be empty") + if with_credentials: + connection: ConnectionsListSecretsResponse = await self._list_secrets( + connection_name=connection_name, ignored="ignore", **kwargs + ) + if connection.properties.auth_type == AuthenticationType.AAD: + return ConnectionProperties(connection=connection, token_credential=self._config.credential) + elif connection.properties.auth_type == AuthenticationType.SAS: + from ...models._patch import SASTokenCredential + + token_credential = SASTokenCredential( + sas_token=connection.properties.credentials.sas, + credential=self._config.credential, + subscription_id=self._config.subscription_id, + resource_group_name=self._config.resource_group_name, + project_name=self._config.project_name, + connection_name=connection_name, + ) + return ConnectionProperties(connection=connection, token_credential=token_credential) + + return ConnectionProperties(connection=connection) + else: + return ConnectionProperties(connection=await self._get(connection_name=connection_name, **kwargs)) + + @distributed_trace_async + async def list( + self, *, connection_type: ConnectionType | None = None, **kwargs: Any + ) -> Iterable[ConnectionProperties]: + """List the properties of all connections, or all connections of a certain connection type. + + :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. + If not provided, all connections are listed. + :type connection_type: ~azure.ai.project.models._models.ConnectionType + :return: A list of connection properties + :rtype: Iterable[~azure.ai.project.models._models.ConnectionProperties] + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connections_list: ConnectionsListResponse = await self._list( + include_all=True, category=connection_type, **kwargs + ) + + # Iterate to create the simplified result property + connection_properties_list: List[ConnectionProperties] = [] + for connection in connections_list.value: + connection_properties_list.append(ConnectionProperties(connection=connection)) + + return connection_properties_list + + +class AgentsOperations(AgentsOperationsGenerated): + + @overload + async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.project.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.AsyncToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.Agent: + """ + Creates a new agent with various configurations, delegating to the generated operations. + + :param body: JSON or IO[bytes]. Required if `model` is not provided. + :param model: The ID of the model to use. Required if `body` is not provided. + :param name: The name of the new agent. + :param description: A description for the new agent. + :param instructions: System instructions for the agent. + :param tools: List of tools definitions for the agent. + :param tool_resources: Resources used by the agent's tools. + :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). + :param temperature: Sampling temperature for generating agent responses. + :param top_p: Nucleus sampling parameter. + :param response_format: Response format for tool calls. + :param metadata: Key/value pairs for storing additional information. + :param content_type: Content type of the body. + :param kwargs: Additional parameters. + :return: An Agent object. + :raises: HttpResponseError for HTTP errors. + """ + if body is not _Unset: + if isinstance(body, io.IOBase): + return await super().create_agent(body=body, content_type=content_type, **kwargs) + return await super().create_agent(body=body, **kwargs) + + if toolset is not None: + self._toolset = toolset + tools = toolset.definitions + tool_resources = toolset.resources + + return await super().create_agent( + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + def get_toolset(self) -> Optional[_models.AsyncToolSet]: + """ + Get the toolset for the agent. + + :return: The toolset for the agent. If not set, returns None. + :rtype: ~azure.ai.project.models.AsyncToolSet + """ + if hasattr(self, "_toolset"): + return self._toolset + return None + + @overload + async def create_run( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=False, + stream=False, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # If streaming is enabled, return the custom stream object + return await response + + @distributed_trace_async + async def create_and_process_run( + self, + thread_id: str, + assistant_id: str, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: int = 1, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread and processes the run. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or + ~azure.ai.project.models.AgentsApiResponseFormatMode or + ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: The time in seconds to wait between polling the service for run status. + Default value is 1. + :paramtype sleep_interval: int + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + # Create and initiate the run with additional parameters + run = await self.create_run( + thread_id=thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + # Monitor and process the run status + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(sleep_interval) + run = await self.get_run(thread_id=thread_id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logging.warning("No tool calls provided - cancelling run") + await self.cancel_run(thread_id=thread_id, run_id=run.id) + break + + toolset = self.get_toolset() + if toolset: + tool_outputs = await toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + logging.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + await self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) + + logging.info("Current run status: %s", run.status) + + return run + + @overload + def create_stream( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AsyncAgentRunStream: + """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_stream( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AsyncAgentRunStream: + """Creates a new stream for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.project.models.AsyncAgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_stream( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AsyncAgentRunStream: + """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AsyncAgentRunStream: + """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.project.models.AsyncAgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=True, + stream=True, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) + + return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.project.models.AsyncAgentEventHandler + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] + :param event_handler: The event handler to use for processing events during the run. + :param kwargs: Additional parameters. + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # If streaming is enabled, return the custom stream object + return await response + + @overload + async def submit_tool_outputs_to_stream( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AsyncAgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AsyncAgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.project.models.AsyncAgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_stream( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AsyncAgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AsyncAgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] + :param event_handler: The event handler to use for processing events during the run. + :param kwargs: Additional parameters. + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # Cast the response to Iterator[bytes] for type correctness + response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) + + return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + + async def _handle_submit_tool_outputs( + self, run: _models.ThreadRun, event_handler: Optional[_models.AsyncAgentEventHandler] = None + ) -> None: + if isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logger.debug("No tool calls to execute.") + return + + toolset = self.get_toolset() + if toolset: + tool_outputs = await toolset.execute_tool_calls(tool_calls) + else: + logger.warning("Toolset is not available in the client.") + return + + logger.info(f"Tool outputs: {tool_outputs}") + if tool_outputs: + async with await self.submit_tool_outputs_to_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler + ) as stream: + await stream.until_done() + + @overload + async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.project._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file( + self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def upload_file( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :param file: File content. Required if `body` and `purpose` are not provided. + :param file_path: Path to the file. Required if `body` and `purpose` are not provided. + :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :param filename: The name of the file. + :param kwargs: Additional parameters. + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + return await super().upload_file(body=body, **kwargs) + + if isinstance(purpose, FilePurpose): + purpose = purpose.value + + if file is not None and purpose is not None: + return await super().upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + + if file_path is not None and purpose is not None: + if not os.path.isfile(file_path): + raise FileNotFoundError(f"The file path provided does not exist: {file_path}") + + try: + with open(file_path, "rb") as f: + content = f.read() + + # Determine filename and create correct FileType + base_filename = filename or os.path.basename(file_path) + file_content: FileType = (base_filename, content) + + return await super().upload_file(file=file_content, purpose=purpose, **kwargs) + except IOError as e: + raise IOError(f"Unable to read file: {file_path}. Reason: {str(e)}") + + raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") + + @overload + async def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file_and_poll( + self, + *, + file: FileType, + purpose: Union[str, _models.FilePurpose], + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.project._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file_and_poll( + self, file_path: str, *, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def upload_file_and_poll( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :param file: File content. Required if `body` and `purpose` are not provided. + :param file_path: Path to the file. Required if `body` and `purpose` are not provided. + :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :param filename: The name of the file. + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :param kwargs: Additional parameters. + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + uploaded_file = await self.upload_file(body=body, **kwargs) + elif file is not None and purpose is not None: + uploaded_file = await self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + elif file_path is not None and purpose is not None: + uploaded_file = await self.upload_file(file_path=file_path, purpose=purpose, **kwargs) + else: + raise ValueError( + "Invalid parameters for upload_file_and_poll. Please provide either 'body', " + "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." + ) + + while uploaded_file.status in ["uploaded", "pending", "running"]: + time.sleep(sleep_interval) + uploaded_file = await self.get_file(uploaded_file.id) + + return uploaded_file + + @overload + async def create_vector_store_and_poll( + self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_and_poll( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_and_poll( + self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_and_poll( + self, + body: Union[JSON, IO[bytes], None] = None, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not None: + vector_store = await self.create_vector_store(body=body, content_type=content_type, **kwargs) + elif file_ids is not None or (name is not None and expires_after is not None): + vector_store = await self.create_vector_store( + content_type=content_type, + file_ids=file_ids, + name=name, + expires_after=expires_after, + chunking_strategy=chunking_strategy, + metadata=metadata, + **kwargs, + ) + else: + raise ValueError( + "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " + "'file_ids', or 'name' and 'expires_after'." + ) + + while vector_store.status == "in_progress": + time.sleep(sleep_interval) + vector_store = await self.get_vector_store(vector_store.id) + + return vector_store + + @overload + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + *, + file_ids: List[str], + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = None, + *, + file_ids: List[str] = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is None: + vector_store_file_batch = await super().create_vector_store_file_batch( + vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs + ) + else: + content_type = kwargs.get("content_type", "application/json") + vector_store_file_batch = await super().create_vector_store_file_batch( + body=body, content_type=content_type, **kwargs + ) + + while vector_store_file_batch.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file_batch = await super().get_vector_store_file_batch( + vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id + ) + + return vector_store_file_batch + + +__all__: List[str] = [ + "AgentsOperations", + "ConnectionsOperations", + "InferenceOperations", +] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py index f7dd32510333..59fd303c56e5 100644 --- a/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py +++ b/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py @@ -6,9 +6,987 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List +import datetime +import inspect +import json +import logging +import base64 +import asyncio -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +from azure.core.credentials import TokenCredential, AccessToken + +from ._enums import AgentStreamEvent, ConnectionType +from ._models import ( + ConnectionsListSecretsResponse, + MessageDeltaChunk, + SubmitToolOutputsAction, + ThreadRun, + RunStep, + ThreadMessage, + RunStepDeltaChunk, + FunctionToolDefinition, + FunctionDefinition, + ToolDefinition, + ToolResources, + FileSearchToolDefinition, + FileSearchToolResource, + CodeInterpreterToolDefinition, + CodeInterpreterToolResource, + RequiredFunctionToolCall, + ConnectionType, +) + +from abc import ABC, abstractmethod +from typing import AsyncIterator, Awaitable, Callable, List, Dict, Any, Type, Optional, Iterator, Tuple, get_origin + +logger = logging.getLogger(__name__) + + +def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[str, Any]: + """ + Remove the parameters, non present in class public fields; return shallow copy of a dictionary. + + **Note:** Classes inherited from the model check that the parameters are present + in the list of attributes and if they are not, the error is being raised. This check may not + be relevant for classes, not inherited from azure.ai.project._model_base.Model. + :param model_class: The class of model to be used. + :param parameters: The parsed dictionary with parameters. + :return: The dictionary with all invalid parameters removed. + """ + new_params = {} + valid_parameters = set( + filter( + lambda x: not x.startswith("_") and hasattr(model_class.__dict__[x], "_type"), model_class.__dict__.keys() + ) + ) + for k in filter(lambda x: x in valid_parameters, parameters.keys()): + new_params[k] = parameters[k] + return new_params + + +def _safe_instantiate(model_class: Type, parameters: Dict[str, Any]) -> Any: + """ + Instantiate class with the set of parameters from the server. + + :param model_class: The class of model to be used. + :param parameters: The parsed dictionary with parameters. + :return: The class of model_class type if parameters is a dictionary, or the parameters themselves otherwise. + """ + if not isinstance(parameters, dict): + return parameters + return model_class(**_filter_parameters(model_class, parameters)) + + +class ConnectionProperties: + """The properties of a single connection. + + :ivar id: A unique identifier for the connection. + :vartype id: str + :ivar name: The friendly name of the connection. + :vartype name: str + :ivar authentication_type: The authentication type used by the connection. + :vartype authentication_type: ~azure.ai.project.models._models.AuthenticationType + :ivar connection_type: The connection type . + :vartype connection_type: ~azure.ai.project.models._models.ConnectionType + :ivar endpoint_url: The endpoint URL associated with this connection + :vartype endpoint_url: str + :ivar key: The api-key to be used when accessing the connection. + :vartype key: str + :ivar token_credential: The TokenCredential to be used when accessing the connection. + :vartype token_credential: ~azure.core.credentials.TokenCredential + """ + + def __init__(self, *, connection: ConnectionsListSecretsResponse, token_credential: TokenCredential = None) -> None: + self.id = connection.id + self.name = connection.name + self.authentication_type = connection.properties.auth_type + self.connection_type = connection.properties.category + self.endpoint_url = ( + connection.properties.target[:-1] + if connection.properties.target.endswith("/") + else connection.properties.target + ) + self.key: str = None + if hasattr(connection.properties, "credentials"): + if hasattr(connection.properties.credentials, "key"): + self.key = connection.properties.credentials.key + self.token_credential = token_credential + + def to_evaluator_model_config(self, deployment_name, api_version) -> Dict[str, str]: + connection_type = self.connection_type.value + if self.connection_type.value == ConnectionType.AZURE_OPEN_AI: + connection_type = "azure_openai" + + if self.authentication_type == "ApiKey": + model_config = { + "azure_deployment": deployment_name, + "azure_endpoint": self.endpoint_url, + "type": connection_type, + "api_version": api_version, + "api_key": f"{self.id}/credentials/key", + } + else: + model_config = { + "azure_deployment": deployment_name, + "azure_endpoint": self.endpoint_url, + "type": self.connection_type, + "api_version": api_version, + } + return model_config + + def __str__(self): + out = "{\n" + out += f' "name": "{self.name}",\n' + out += f' "id": "{self.id}",\n' + out += f' "authentication_type": "{self.authentication_type}",\n' + out += f' "connection_type": "{self.connection_type}",\n' + out += f' "endpoint_url": "{self.endpoint_url}",\n' + if self.key: + out += f' "key": "{self.key}",\n' + else: + out += f' "key": null,\n' + if self.token_credential: + access_token = self.token_credential.get_token("https://cognitiveservices.azure.com/.default") + out += f' "token_credential": "{access_token.token}", expires on {access_token.expires_on} ({datetime.datetime.fromtimestamp(access_token.expires_on, datetime.timezone.utc)})\n' + else: + out += f' "token_credential": null\n' + out += "}\n" + return out + + +class SASTokenCredential(TokenCredential): + def __init__( + self, + *, + sas_token: str, + credential: TokenCredential, + subscription_id: str, + resource_group_name: str, + project_name: str, + connection_name: str, + ): + self._sas_token = sas_token + self._credential = credential + self._subscription_id = subscription_id + self._resource_group_name = resource_group_name + self._project_name = project_name + self._connection_name = connection_name + self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) + logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) + + @classmethod + def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime: + payload = jwt_token.split(".")[1] + padded_payload = payload + "=" * (4 - len(payload) % 4) # Add padding if necessary + decoded_bytes = base64.urlsafe_b64decode(padded_payload) + decoded_str = decoded_bytes.decode("utf-8") + decoded_payload = json.loads(decoded_str) + expiration_date = decoded_payload.get("exp") + return datetime.datetime.fromtimestamp(expiration_date, datetime.timezone.utc) + + def _refresh_token(self) -> None: + logger.debug("[SASTokenCredential._refresh_token] Enter") + from azure.ai.project import AIProjectClient + + project_client = AIProjectClient( + credential=self._credential, + endpoint="not-needed", # Since we are only going to use the "endpoints" operations, we don't need to supply an endpoint. http://management.azure.com is hard coded in the SDK. + subscription_id=self._subscription_id, + resource_group_name=self._resource_group_name, + project_name=self._project_name, + ) + + connection = project_client.endpoints.get(connection_name=self._connection_name, populate_secrets=True) + + self._sas_token = connection.properties.credentials.sas + self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) + logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) + + def get_token(self) -> AccessToken: + logger.debug("SASTokenCredential.get_token] Enter") + if self._expires_on < datetime.datetime.now(datetime.timezone.utc): + self._refresh_token() + return AccessToken(self._sas_token, self._expires_on.timestamp()) + + +# Define type_map to translate Python type annotations to JSON Schema types +type_map = { + "str": "string", + "int": "integer", + "float": "number", + "bool": "boolean", + "bytes": "string", # Typically encoded as base64-encoded strings in JSON + "NoneType": "null", + "datetime": "string", # Use format "date-time" + "date": "string", # Use format "date" + "UUID": "string", # Use format "uuid" +} + + +def _map_type(annotation) -> str: + + if annotation == inspect.Parameter.empty: + return "string" # Default type if annotation is missing + + origin = get_origin(annotation) + + if origin in {list, List}: + return "array" + elif origin in {dict, Dict}: + return "object" + elif hasattr(annotation, "__name__"): + return type_map.get(annotation.__name__, "string") + elif isinstance(annotation, type): + return type_map.get(annotation.__name__, "string") + + return "string" # Fallback to "string" if type is unrecognized + + +class Tool(ABC): + """ + An abstract class representing a tool that can be used by an agent. + """ + + @property + @abstractmethod + def definitions(self) -> List[ToolDefinition]: + """Get the tool definitions.""" + pass + + @property + @abstractmethod + def resources(self) -> ToolResources: + """Get the tool resources.""" + pass + + @abstractmethod + def execute(self, tool_call: Any) -> Any: + """ + Execute the tool with the provided tool call. + + :param tool_call: The tool call to execute. + :return: The output of the tool operations. + """ + pass + + +class FunctionTool(Tool): + """ + A tool that executes user-defined functions. + """ + + def __init__(self, functions: Dict[str, Any]): + """ + Initialize FunctionTool with a dictionary of functions. + + :param functions: A dictionary where keys are function names and values are the function objects. + """ + self._functions = functions + self._definitions = self._build_function_definitions(functions) + + def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDefinition]: + specs = [] + for name, func in functions.items(): + sig = inspect.signature(func) + params = sig.parameters + docstring = inspect.getdoc(func) + description = docstring.split("\n")[0] if docstring else "No description" + + properties = {} + for param_name, param in params.items(): + param_type = _map_type(param.annotation) + param_description = param.annotation.__doc__ if param.annotation != inspect.Parameter.empty else None + properties[param_name] = {"type": param_type, "description": param_description} + + function_def = FunctionDefinition( + name=name, + description=description, + parameters={"type": "object", "properties": properties, "required": list(params.keys())}, + ) + tool_def = FunctionToolDefinition(function=function_def) + specs.append(tool_def) + return specs + + def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, Dict[str, Any]]: + function_name = tool_call.function.name + arguments = tool_call.function.arguments + + if function_name not in self._functions: + logging.error(f"Function '{function_name}' not found.") + raise ValueError(f"Function '{function_name}' not found.") + + function = self._functions[function_name] + + try: + parsed_arguments = json.loads(arguments) + except json.JSONDecodeError as e: + logging.error(f"Invalid JSON arguments for function '{function_name}': {e}") + raise ValueError(f"Invalid JSON arguments: {e}") from e + + if not isinstance(parsed_arguments, dict): + logging.error(f"Arguments must be a JSON object for function '{function_name}'.") + raise TypeError("Arguments must be a JSON object.") + + return function, parsed_arguments + + def execute(self, tool_call: RequiredFunctionToolCall) -> Any: + function, parsed_arguments = self._get_func_and_args(tool_call) + + try: + return function(**parsed_arguments) if parsed_arguments else function() + except TypeError as e: + logging.error(f"Error executing function '{tool_call.function.name}': {e}") + raise + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the function definitions. + + :return: A list of function definitions. + """ + return self._definitions + + @property + def resources(self) -> ToolResources: + """ + Get the tool resources for the agent. + + :return: An empty ToolResources as FunctionTool doesn't have specific resources. + """ + return ToolResources() + + +class AsyncFunctionTool(FunctionTool): + + async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: + function, parsed_arguments = self._get_func_and_args(tool_call) + + try: + if inspect.iscoroutinefunction(function): + return await function(**parsed_arguments) if parsed_arguments else await function() + else: + return function(**parsed_arguments) if parsed_arguments else function() + except TypeError as e: + logging.error(f"Error executing function '{tool_call.function.name}': {e}") + raise + + +class FileSearchTool(Tool): + """ + A tool that searches for uploaded file information from the created vector stores. + """ + + def __init__(self, vector_store_ids: List[str] = []): + self.vector_store_ids = vector_store_ids + + def add_vector_store(self, store_id: str): + """ + Add a vector store ID to the list of vector stores to search for files. + """ + self.vector_store_ids.append(store_id) + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the file search tool definitions. + """ + return [FileSearchToolDefinition()] + + @property + def resources(self) -> ToolResources: + """ + Get the file search resources. + """ + return ToolResources(file_search=FileSearchToolResource(vector_store_ids=self.vector_store_ids)) + + def execute(self, tool_call: Any) -> Any: + pass + + +class CodeInterpreterTool(Tool): + """ + A tool that interprets code files uploaded to the agent. + """ + + def __init__(self): + self.file_ids = [] + + def add_file(self, file_id: str): + """ + Add a file ID to the list of files to interpret. + + :param file_id: The ID of the file to interpret. + """ + self.file_ids.append(file_id) + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the code interpreter tool definitions. + """ + return [CodeInterpreterToolDefinition()] + + @property + def resources(self) -> ToolResources: + """ + Get the code interpreter resources. + """ + return ToolResources(code_interpreter=CodeInterpreterToolResource(file_ids=self.file_ids)) + + def execute(self, tool_call: Any) -> Any: + pass + + +class ToolSet: + """ + A collection of tools that can be used by an agent. + """ + + def __init__(self): + self._tools: List[Tool] = [] + + def validate_tool_type(self, tool_type: Type[Tool]) -> None: + """ + Validate the type of the tool. + + :param tool_type: The type of the tool to validate. + :raises ValueError: If the tool type is not a subclass of Tool. + """ + if isinstance(tool_type, AsyncFunctionTool): + raise ValueError( + "AsyncFunctionTool is not supported in ToolSet. To use async functions, use AsyncToolSet and agents operations in azure.ai.project.aio." + ) + + def add(self, tool: Tool): + """ + Add a tool to the tool set. + + :param tool: The tool to add. + :raises ValueError: If a tool of the same type already exists. + """ + self.validate_tool_type(type(tool)) + + if any(isinstance(existing_tool, type(tool)) for existing_tool in self._tools): + raise ValueError("Tool of type {type(tool).__name__} already exists in the ToolSet.") + self._tools.append(tool) + + def remove(self, tool_type: Type[Tool]) -> None: + """ + Remove a tool of the specified type from the tool set. + + :param tool_type: The type of tool to remove. + :raises ValueError: If a tool of the specified type is not found. + """ + for i, tool in enumerate(self._tools): + if isinstance(tool, tool_type): + del self._tools[i] + logging.info(f"Tool of type {tool_type.__name__} removed from the ToolSet.") + return + raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the definitions for all tools in the tool set. + """ + tools = [] + for tool in self._tools: + tools.extend(tool.definitions) + return tools + + @property + def resources(self) -> ToolResources: + """ + Get the resources for all tools in the tool set. + """ + tool_resources = {} + for tool in self._tools: + resources = tool.resources + for key, value in resources.items(): + if key in tool_resources: + if isinstance(tool_resources[key], dict) and isinstance(value, dict): + tool_resources[key].update(value) + else: + tool_resources[key] = value + return self._create_tool_resources_from_dict(tool_resources) + + def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolResources: + """ + Safely converts a dictionary into a ToolResources instance. + """ + try: + return ToolResources(**resources) + except TypeError as e: + logging.error(f"Error creating ToolResources: {e}") + raise ValueError("Invalid resources for ToolResources.") from e + + def get_definitions_and_resources(self) -> Dict[str, Any]: + """ + Get the definitions and resources for all tools in the tool set. + + :return: A dictionary containing the tool resources and definitions. + """ + return { + "tool_resources": self.resources, + "tools": self.definitions, + } + + def get_tool(self, tool_type: Type[Tool]) -> Tool: + """ + Get a tool of the specified type from the tool set. + + :param tool_type: The type of tool to get. + :return: The tool of the specified type. + :raises ValueError: If a tool of the specified type is not found. + """ + for tool in self._tools: + if isinstance(tool, tool_type): + return tool + raise ValueError(f"Tool of type {tool_type.__name__} not found.") + + def execute_tool_calls(self, tool_calls: List[Any]) -> Any: + """ + Execute a tool of the specified type with the provided tool calls. + + :param tool_calls: A list of tool calls to execute. + :return: The output of the tool operations. + """ + tool_outputs = [] + + for tool_call in tool_calls: + try: + if tool_call.type == "function": + tool = self.get_tool(FunctionTool) + output = tool.execute(tool_call) + tool_output = { + "tool_call_id": tool_call.id, + "output": output, + } + tool_outputs.append(tool_output) + except Exception as e: + logging.error(f"Failed to execute tool call {tool_call}: {e}") + + return tool_outputs + + +class AsyncToolSet(ToolSet): + + def validate_tool_type(self, tool_type: Type[Tool]) -> None: + """ + Validate the type of the tool. + + :param tool_type: The type of the tool to validate. + :raises ValueError: If the tool type is not a subclass of Tool. + """ + if isinstance(tool_type, FunctionTool): + raise ValueError( + "FunctionTool is not supported in AsyncToolSet. Please use AsyncFunctionTool instead and provide sync and/or async function(s)." + ) + + async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: + """ + Execute a tool of the specified type with the provided tool calls. + + :param tool_calls: A list of tool calls to execute. + :return: The output of the tool operations. + """ + tool_outputs = [] + + for tool_call in tool_calls: + try: + if tool_call.type == "function": + tool = self.get_tool(AsyncFunctionTool) + output = await tool.execute(tool_call) + tool_output = { + "tool_call_id": tool_call.id, + "output": output, + } + tool_outputs.append(tool_output) + except Exception as e: + logging.error(f"Failed to execute tool call {tool_call}: {e}") + + return tool_outputs + + +class AgentEventHandler: + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + """Handle message delta events.""" + pass + + def on_thread_message(self, message: "ThreadMessage") -> None: + """Handle thread message events.""" + pass + + def on_thread_run(self, run: "ThreadRun") -> None: + """Handle thread run events.""" + pass + + def on_run_step(self, step: "RunStep") -> None: + """Handle run step events.""" + pass + + def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: + """Handle run step delta events.""" + pass + + def on_error(self, data: str) -> None: + """Handle error events.""" + pass + + def on_done(self) -> None: + """Handle the completion of the stream.""" + pass + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + """Handle any unhandled event types.""" + pass + + +class AsyncAgentEventHandler: + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + """Handle message delta events.""" + pass + + async def on_thread_message(self, message: "ThreadMessage") -> None: + """Handle thread message events.""" + pass + + async def on_thread_run(self, run: "ThreadRun") -> None: + """Handle thread run events.""" + pass + + async def on_run_step(self, step: "RunStep") -> None: + """Handle run step events.""" + pass + + async def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: + """Handle run step delta events.""" + pass + + async def on_error(self, data: str) -> None: + """Handle error events.""" + pass + + async def on_done(self) -> None: + """Handle the completion of the stream.""" + pass + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + """Handle any unhandled event types.""" + pass + + +class AsyncAgentRunStream(AsyncIterator[Tuple[str, Any]]): + def __init__( + self, + response_iterator: AsyncIterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, Optional[AsyncAgentEventHandler]], Awaitable[None]], + event_handler: Optional["AsyncAgentEventHandler"] = None, + ): + self.response_iterator = response_iterator + self.event_handler = event_handler + self.done = False + self.buffer = "" + self.submit_tool_outputs = submit_tool_outputs + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + close_method = getattr(self.response_iterator, "close", None) + if callable(close_method): + result = close_method() + if asyncio.iscoroutine(result): + await result + + def __aiter__(self): + return self + + async def __anext__(self) -> Tuple[str, Any]: + while True: + try: + chunk = await self.response_iterator.__anext__() + self.buffer += chunk.decode("utf-8") + except StopAsyncIteration: + if self.buffer: + event_data_str, self.buffer = self.buffer, "" + if event_data_str: + return await self._process_event(event_data_str) + raise StopAsyncIteration + + while "\n\n" in self.buffer: + event_data_str, self.buffer = self.buffer.split("\n\n", 1) + return await self._process_event(event_data_str) + + def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: + event_lines = event_data_str.strip().split("\n") + event_type = None + event_data = "" + + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + try: + parsed_data = json.loads(event_data) + except json.JSONDecodeError: + parsed_data = event_data + + # Workaround for service bug: Rename 'expires_at' to 'expired_at' + if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: + parsed_data["expired_at"] = parsed_data.pop("expires_at") + + # Map to the appropriate class instance + if event_type in { + AgentStreamEvent.THREAD_RUN_CREATED, + AgentStreamEvent.THREAD_RUN_QUEUED, + AgentStreamEvent.THREAD_RUN_IN_PROGRESS, + AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION, + AgentStreamEvent.THREAD_RUN_COMPLETED, + AgentStreamEvent.THREAD_RUN_FAILED, + AgentStreamEvent.THREAD_RUN_CANCELLING, + AgentStreamEvent.THREAD_RUN_CANCELLED, + AgentStreamEvent.THREAD_RUN_EXPIRED, + }: + event_data_obj = _safe_instantiate(ThreadRun, parsed_data) + elif event_type in { + AgentStreamEvent.THREAD_RUN_STEP_CREATED, + AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, + AgentStreamEvent.THREAD_RUN_STEP_COMPLETED, + AgentStreamEvent.THREAD_RUN_STEP_FAILED, + AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, + AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, + }: + event_data_obj = _safe_instantiate(RunStep, parsed_data) + elif event_type in { + AgentStreamEvent.THREAD_MESSAGE_CREATED, + AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, + AgentStreamEvent.THREAD_MESSAGE_COMPLETED, + AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, + }: + event_data_obj = _safe_instantiate(ThreadMessage, parsed_data) + elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: + event_data_obj = _safe_instantiate(MessageDeltaChunk, parsed_data) + elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: + event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) + else: + event_data_obj = parsed_data + + return event_type, event_data_obj + + async def _process_event(self, event_data_str: str) -> Tuple[str, Any]: + event_type, event_data_obj = self._parse_event_data(event_data_str) + + if ( + isinstance(event_data_obj, ThreadRun) + and event_data_obj.status == "requires_action" + and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) + ): + await self.submit_tool_outputs(event_data_obj, self.event_handler) + if self.event_handler: + try: + if isinstance(event_data_obj, MessageDeltaChunk): + await self.event_handler.on_message_delta(event_data_obj) + elif isinstance(event_data_obj, ThreadMessage): + await self.event_handler.on_thread_message(event_data_obj) + elif isinstance(event_data_obj, ThreadRun): + await self.event_handler.on_thread_run(event_data_obj) + elif isinstance(event_data_obj, RunStep): + await self.event_handler.on_run_step(event_data_obj) + elif isinstance(event_data_obj, RunStepDeltaChunk): + await self.event_handler.on_run_step_delta(event_data_obj) + elif event_type == AgentStreamEvent.ERROR: + await self.event_handler.on_error(event_data_obj) + elif event_type == AgentStreamEvent.DONE: + await self.event_handler.on_done() + self.done = True # Mark the stream as done + else: + await self.event_handler.on_unhandled_event(event_type, event_data_obj) + except Exception as e: + logging.error(f"Error in event handler for event '{event_type}': {e}") + + return event_type, event_data_obj + + async def until_done(self) -> None: + """ + Iterates through all events until the stream is marked as done. + """ + try: + async for _ in self: + pass # The EventHandler handles the events + except StopAsyncIteration: + pass + + +class AgentRunStream(Iterator[Tuple[str, Any]]): + def __init__( + self, + response_iterator: Iterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, Optional[AgentEventHandler]], None], + event_handler: Optional[AgentEventHandler] = None, + ): + self.response_iterator = response_iterator + self.event_handler = event_handler + self.done = False + self.buffer = "" + self.submit_tool_outputs = submit_tool_outputs + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + close_method = getattr(self.response_iterator, "close", None) + if callable(close_method): + close_method() + + def __iter__(self): + return self + + def __next__(self) -> Tuple[str, Any]: + if self.done: + raise StopIteration + while True: + try: + chunk = next(self.response_iterator) + self.buffer += chunk.decode("utf-8") + except StopIteration: + if self.buffer: + event_data_str, self.buffer = self.buffer, "" + if event_data_str: + return self._process_event(event_data_str) + raise StopIteration + + while "\n\n" in self.buffer: + event_data_str, self.buffer = self.buffer.split("\n\n", 1) + return self._process_event(event_data_str) + + def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: + event_lines = event_data_str.strip().split("\n") + event_type = None + event_data = "" + + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + try: + parsed_data = json.loads(event_data) + except json.JSONDecodeError: + parsed_data = event_data + + # Workaround for service bug: Rename 'expires_at' to 'expired_at' + if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: + parsed_data["expired_at"] = parsed_data.pop("expires_at") + + # Map to the appropriate class instance + if event_type in { + AgentStreamEvent.THREAD_RUN_CREATED, + AgentStreamEvent.THREAD_RUN_QUEUED, + AgentStreamEvent.THREAD_RUN_IN_PROGRESS, + AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION, + AgentStreamEvent.THREAD_RUN_COMPLETED, + AgentStreamEvent.THREAD_RUN_FAILED, + AgentStreamEvent.THREAD_RUN_CANCELLING, + AgentStreamEvent.THREAD_RUN_CANCELLED, + AgentStreamEvent.THREAD_RUN_EXPIRED, + }: + event_data_obj = _safe_instantiate(ThreadRun, parsed_data) + elif event_type in { + AgentStreamEvent.THREAD_RUN_STEP_CREATED, + AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, + AgentStreamEvent.THREAD_RUN_STEP_COMPLETED, + AgentStreamEvent.THREAD_RUN_STEP_FAILED, + AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, + AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, + }: + event_data_obj = _safe_instantiate(RunStep, parsed_data) + elif event_type in { + AgentStreamEvent.THREAD_MESSAGE_CREATED, + AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, + AgentStreamEvent.THREAD_MESSAGE_COMPLETED, + AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, + }: + event_data_obj = _safe_instantiate(ThreadMessage, parsed_data) + elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: + event_data_obj = _safe_instantiate(MessageDeltaChunk, parsed_data) + elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: + event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) + else: + event_data_obj = parsed_data + + return event_type, event_data_obj + + def _process_event(self, event_data_str: str) -> Tuple[str, Any]: + event_type, event_data_obj = self._parse_event_data(event_data_str) + + if ( + isinstance(event_data_obj, ThreadRun) + and event_data_obj.status == "requires_action" + and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) + ): + self.submit_tool_outputs(event_data_obj, self.event_handler) + if self.event_handler: + try: + if isinstance(event_data_obj, MessageDeltaChunk): + self.event_handler.on_message_delta(event_data_obj) + elif isinstance(event_data_obj, ThreadMessage): + self.event_handler.on_thread_message(event_data_obj) + elif isinstance(event_data_obj, ThreadRun): + self.event_handler.on_thread_run(event_data_obj) + elif isinstance(event_data_obj, RunStep): + self.event_handler.on_run_step(event_data_obj) + elif isinstance(event_data_obj, RunStepDeltaChunk): + self.event_handler.on_run_step_delta(event_data_obj) + elif event_type == AgentStreamEvent.ERROR: + self.event_handler.on_error(event_data_obj) + elif event_type == AgentStreamEvent.DONE: + self.event_handler.on_done() + self.done = True # Mark the stream as done + else: + self.event_handler.on_unhandled_event(event_type, event_data_obj) + except Exception as e: + logging.error(f"Error in event handler for event '{event_type}': {e}") + + return event_type, event_data_obj + + def until_done(self) -> None: + """ + Iterates through all events until the stream is marked as done. + """ + try: + for _ in self: + pass # The EventHandler handles the events + except StopIteration: + pass + + +__all__: List[str] = [ + "AgentEventHandler", + "AgentRunStream", + "AsyncAgentEventHandler", + "AsyncAgentRunStream", + "AsyncFunctionTool", + "AsyncToolSet", + "CodeInterpreterTool", + "FileSearchTool", + "FunctionTool", + "SASTokenCredential", + "Tool", + "ToolSet", +] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-project/azure/ai/project/operations/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/operations/_patch.py index f7dd32510333..30d23f0ccd59 100644 --- a/sdk/ai/azure-ai-project/azure/ai/project/operations/_patch.py +++ b/sdk/ai/azure-ai-project/azure/ai/project/operations/_patch.py @@ -1,3 +1,5 @@ +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -6,9 +8,1969 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List +import sys, io, logging, os, time +from io import IOBase +from typing import List, Iterable, Union, IO, Any, Dict, Optional, overload, TYPE_CHECKING, Iterator, cast -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level +# from zoneinfo import ZoneInfo +from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated +from ._operations import AgentsOperations as AgentsOperationsGenerated +from ..models._enums import AuthenticationType, ConnectionType +from ..models._models import ConnectionsListSecretsResponse, ConnectionsListResponse +from .._types import AgentsApiResponseFormatOption +from ..models._patch import ConnectionProperties +from ..models._enums import FilePurpose +from .._vendor import FileType +from .. import models as _models + +from azure.core.tracing.decorator import distributed_trace + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from .. import _types + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() + +logger = logging.getLogger(__name__) + + +class InferenceOperations: + + def __init__(self, outer_instance): + self.outer_instance = outer_instance + + @distributed_trace + def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": + """Get an authenticated ChatCompletionsClient (from the package azure-ai-inference) for the default + Serverless connection. The Serverless connection must have a Chat Completions AI model deployment. + The package `azure-ai-inference` must be installed prior to calling this method. + + :return: An authenticated chat completions client + :rtype: ~azure.ai.inference.models.ChatCompletionsClient + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connection = self.outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + ) + if not connection: + raise ValueError("No serverless connection found") + + try: + from azure.ai.inference import ChatCompletionsClient + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" + ) + + if connection.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" + ) + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) + ) + elif connection.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" + ) + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) + elif connection.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" + ) + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) + else: + raise ValueError("Unknown authentication type") + + return client + + @distributed_trace + def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": + """Get an authenticated EmbeddingsClient (from the package azure-ai-inference) for the default + Serverless connection. The Serverless connection must have a Text Embeddings AI model deployment. + The package `azure-ai-inference` must be installed prior to calling this method. + + :return: An authenticated chat completions client + :rtype: ~azure.ai.inference.models.EmbeddingsClient + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connection = self.outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + ) + if not connection: + raise ValueError("No serverless connection found") + + try: + from azure.ai.inference import EmbeddingsClient + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" + ) + + if connection.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" + ) + from azure.core.credentials import AzureKeyCredential + + client = EmbeddingsClient( + endpoint=connection.authentication_type, credential=AzureKeyCredential(connection.key) + ) + elif connection.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" + ) + client = EmbeddingsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) + elif connection.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" + ) + client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) + else: + raise ValueError("Unknown authentication type") + + return client + + @distributed_trace + def get_azure_openai_client(self, **kwargs) -> "AzureOpenAI": + """Get an authenticated AzureOpenAI client (from the `openai` package) for the default + Azure OpenAI connection. The package `openai` must be installed prior to calling this method. + + :return: An authenticated AzureOpenAI client + :rtype: ~openai.AzureOpenAI + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connection = self.outer_instance.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs + ) + if not connection: + raise ValueError("No Azure OpenAI connection found") + + try: + from openai import AzureOpenAI + except ModuleNotFoundError as _: + raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai'") + + # Pick latest GA version from the "Data plane - Inference" row in the table + # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + AZURE_OPENAI_API_VERSION = "2024-06-01" + + if connection.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" + ) + client = AzureOpenAI( + api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION + ) + elif connection.authentication_type == AuthenticationType.AAD: + logger.debug( + "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" + ) + try: + from azure.identity import get_bearer_token_provider + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "azure.identity package not installed. Please install it using 'pip install azure.identity'" + ) + client = AzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider( + connection.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=connection.endpoint_url, + api_version=AZURE_OPENAI_API_VERSION, + ) + elif connection.authentication_type == AuthenticationType.SAS: + logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") + client = AzureOpenAI( + azure_ad_token_provider=get_bearer_token_provider( + connection.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=connection.endpoint_url, + api_version=AZURE_OPENAI_API_VERSION, + ) + else: + raise ValueError("Unknown authentication type") + + return client + + +class ConnectionsOperations(ConnectionsOperationsGenerated): + + @distributed_trace + def get_default( + self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any + ) -> ConnectionProperties: + """Get the properties of the default connection of a certain connection type, with or without + populating authentication credentials. + + :param connection_type: The connection type. Required. + :type connection_type: ~azure.ai.project.models._models.ConnectionType + :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :type with_credentials: bool + :return: The connection properties + :rtype: ~azure.ai.project.models._models.ConnectionProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + if not connection_type: + raise ValueError("You must specify an connection type") + # Since there is no notion of default connection at the moment, list all connections in the category + # and return the first one + connection_properties_list = self.list(connection_type=connection_type, **kwargs) + if len(connection_properties_list) > 0: + if with_credentials: + return self.get( + connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs + ) + else: + return connection_properties_list[0] + else: + return None + + @distributed_trace + def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: + """Get the properties of a single connection, given its connection name, with or without + populating authentication credentials. + + :param connection_name: Connection Name. Required. + :type connection_name: str + :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :type with_credentials: bool + :return: The connection properties + :rtype: ~azure.ai.project.models._models.ConnectionProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + if not connection_name: + raise ValueError("Connection name cannot be empty") + if with_credentials: + connection: ConnectionsListSecretsResponse = self._list_secrets( + connection_name=connection_name, ignored="ignore", **kwargs + ) + if connection.properties.auth_type == AuthenticationType.AAD: + return ConnectionProperties(connection=connection, token_credential=self._config.credential) + elif connection.properties.auth_type == AuthenticationType.SAS: + from ..models._patch import SASTokenCredential + + token_credential = SASTokenCredential( + sas_token=connection.properties.credentials.sas, + credential=self._config.credential, + subscription_id=self._config.subscription_id, + resource_group_name=self._config.resource_group_name, + project_name=self._config.project_name, + connection_name=connection_name, + ) + return ConnectionProperties(connection=connection, token_credential=token_credential) + + return ConnectionProperties(connection=connection) + else: + return ConnectionProperties(connection=self._get(connection_name=connection_name, **kwargs)) + + @distributed_trace + def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) -> Iterable[ConnectionProperties]: + """List the properties of all connections, or all connections of a certain connection type. + + :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. + If not provided, all connections are listed. + :type connection_type: ~azure.ai.project.models._models.ConnectionType + :return: A list of connection properties + :rtype: Iterable[~azure.ai.project.models._models.ConnectionProperties] + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connections_list: ConnectionsListResponse = self._list(include_all=True, category=connection_type, **kwargs) + + # Iterate to create the simplified result property + connection_properties_list: List[ConnectionProperties] = [] + for connection in connections_list.value: + connection_properties_list.append(ConnectionProperties(connection=connection)) + + return connection_properties_list + + +class AgentsOperations(AgentsOperationsGenerated): + @overload + def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.project.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.project.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.project.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.Agent: + """ + Creates a new agent with various configurations, delegating to the generated operations. + + :param body: JSON or IO[bytes]. Required if `model` is not provided. + :param model: The ID of the model to use. Required if `body` is not provided. + :param name: The name of the new agent. + :param description: A description for the new agent. + :param instructions: System instructions for the agent. + :param tools: List of tools definitions for the agent. + :param tool_resources: Resources used by the agent's tools. + :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). + :param temperature: Sampling temperature for generating agent responses. + :param top_p: Nucleus sampling parameter. + :param response_format: Response format for tool calls. + :param metadata: Key/value pairs for storing additional information. + :param content_type: Content type of the body. + :param kwargs: Additional parameters. + :return: An Agent object. + :raises: HttpResponseError for HTTP errors. + """ + if body is not _Unset: + if isinstance(body, IOBase): + return super().create_agent(body=body, content_type=content_type, **kwargs) + return super().create_agent(body=body, **kwargs) + + if toolset is not None: + self._toolset = toolset + tools = toolset.definitions + tool_resources = toolset.resources + + return super().create_agent( + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + def get_toolset(self) -> Optional[_models.ToolSet]: + """ + Get the toolset for the agent. + + :return: The toolset for the agent. If not set, returns None. + :rtype: ~azure.ai.project.models.ToolSet + """ + if hasattr(self, "_toolset"): + return self._toolset + return None + + @overload + def create_run( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=False, + stream=False, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # If streaming is enabled, return the custom stream object + return response + + @distributed_trace + def create_and_process_run( + self, + thread_id: str, + assistant_id: str, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: int = 1, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread and processes the run. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or + ~azure.ai.project.models.AgentsApiResponseFormatMode or + ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: The time in seconds to wait between polling the service for run status. + Default value is 1. + :paramtype sleep_interval: int + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + # Create and initiate the run with additional parameters + run = self.create_run( + thread_id=thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + # Monitor and process the run status + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(sleep_interval) + run = self.get_run(thread_id=thread_id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logging.warning("No tool calls provided - cancelling run") + self.cancel_run(thread_id=thread_id, run_id=run.id) + break + + toolset = self.get_toolset() + if toolset: + tool_outputs = toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + logging.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) + + logging.info("Current run status: %s", run.status) + + return run + + @overload + def create_stream( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentRunStream: + """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_stream( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AgentRunStream: + """Creates a new stream for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.project.models.AgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_stream( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentRunStream: + """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AgentRunStream: + """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.project.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.project.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode + or ~azure.ai.project.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.project.models.AgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=True, + stream=True, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) + + return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.project.models.AgentEventHandler + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] + :param event_handler: The event handler to use for processing events during the run. + :param kwargs: Additional parameters. + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.project.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # If streaming is enabled, return the custom stream object + return response + + @overload + def submit_tool_outputs_to_stream( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.project.models.AgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_stream( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] + :param event_handler: The event handler to use for processing events during the run. + :param kwargs: Additional parameters. + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.project.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # Cast the response to Iterator[bytes] for type correctness + response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) + + return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + + def _handle_submit_tool_outputs( + self, run: _models.ThreadRun, event_handler: Optional[_models.AgentEventHandler] = None + ) -> None: + if isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logger.debug("No tool calls to execute.") + return + + toolset = self.get_toolset() + if toolset: + tool_outputs = toolset.execute_tool_calls(tool_calls) + else: + logger.warning("Toolset is not available in the client.") + return + + logger.info(f"Tool outputs: {tool_outputs}") + if tool_outputs: + with self.submit_tool_outputs_to_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler + ) as stream: + stream.until_done() + + @overload + def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.project._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file( + self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def upload_file( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :param file: File content. Required if `body` and `purpose` are not provided. + :param file_path: Path to the file. Required if `body` and `purpose` are not provided. + :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :param filename: The name of the file. + :param kwargs: Additional parameters. + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + return super().upload_file(body=body, **kwargs) + + if isinstance(purpose, FilePurpose): + purpose = purpose.value + + if file is not None and purpose is not None: + return super().upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + + if file_path is not None and purpose is not None: + if not os.path.isfile(file_path): + raise FileNotFoundError(f"The file path provided does not exist: {file_path}") + + try: + with open(file_path, "rb") as f: + content = f.read() + + # Determine filename and create correct FileType + base_filename = filename or os.path.basename(file_path) + file_content: FileType = (base_filename, content) + + return super().upload_file(file=file_content, purpose=purpose, **kwargs) + except IOError as e: + raise IOError(f"Unable to read file: {file_path}. Reason: {str(e)}") + + raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") + + @overload + def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file_and_poll( + self, + *, + file: FileType, + purpose: Union[str, _models.FilePurpose], + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.project._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file_and_poll( + self, file_path: str, *, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.project.models.FilePurpose + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.project.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def upload_file_and_poll( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :param file: File content. Required if `body` and `purpose` are not provided. + :param file_path: Path to the file. Required if `body` and `purpose` are not provided. + :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :param filename: The name of the file. + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :param kwargs: Additional parameters. + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + uploaded_file = self.upload_file(body=body, **kwargs) + elif file is not None and purpose is not None: + uploaded_file = self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + elif file_path is not None and purpose is not None: + uploaded_file = self.upload_file(file_path=file_path, purpose=purpose, **kwargs) + else: + raise ValueError( + "Invalid parameters for upload_file_and_poll. Please provide either 'body', " + "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." + ) + + while uploaded_file.status in ["uploaded", "pending", "running"]: + time.sleep(sleep_interval) + uploaded_file = self.get_file(uploaded_file.id) + + return uploaded_file + + @overload + def create_vector_store_and_poll( + self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_and_poll( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_and_poll( + self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_and_poll( + self, + body: Union[JSON, IO[bytes], None] = None, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not None: + vector_store = self.create_vector_store(body=body, content_type=content_type, **kwargs) + elif file_ids is not None or (name is not None and expires_after is not None): + vector_store = self.create_vector_store( + content_type=content_type, + file_ids=file_ids, + name=name, + expires_after=expires_after, + chunking_strategy=chunking_strategy, + metadata=metadata, + **kwargs, + ) + else: + raise ValueError( + "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " + "'file_ids', or 'name' and 'expires_after'." + ) + + while vector_store.status == "in_progress": + time.sleep(sleep_interval) + vector_store = self.get_vector_store(vector_store.id) + + return vector_store + + @overload + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + *, + file_ids: List[str], + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = None, + *, + file_ids: List[str] = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.project.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is None: + vector_store_file_batch = super().create_vector_store_file_batch( + vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs + ) + else: + content_type = kwargs.get("content_type", "application/json") + vector_store_file_batch = super().create_vector_store_file_batch( + body=body, content_type=content_type, **kwargs + ) + + while vector_store_file_batch.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file_batch = super().get_vector_store_file_batch( + vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id + ) + + return vector_store_file_batch + + +__all__: List[str] = [ + "AgentsOperations", + "ConnectionsOperations", + "InferenceOperations", +] # Add all objects you want publicly available to users at this package level def patch_sdk(): diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_basics_async.py new file mode 100644 index 000000000000..bcc25e36490b --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_basics_async.py @@ -0,0 +1,76 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_basics_async.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_basics_async.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import time + +from azure.ai.project.aio import AIProjectClient +from azure.identity import DefaultAzureCredential + +import os + + +async def main(): + + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + + # poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_functions_async.py new file mode 100644 index 000000000000..75a37b873be3 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_functions_async.py @@ -0,0 +1,117 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_functions_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with custom functions from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_functions_async.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import time + +from azure.ai.project.aio import AIProjectClient +from azure.ai.project.models import AsyncFunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction +from azure.identity import DefaultAzureCredential + +import os + +from user_async_functions import user_async_functions + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + # Initialize assistant functions + functions = AsyncFunctionTool(functions=user_async_functions) + + # Create agent + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=functions.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + # Create thread for communication + thread = await project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create and send message + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, what's the time?" + ) + print(f"Created message, ID: {message.id}") + + # Create and run assistant task + run = await project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, ID: {run.id}") + + # Polling loop for run status + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(4) + run = await project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + await project_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = await functions.execute(tool_call) + tool_outputs.append( + { + "tool_call_id": tool_call.id, + "output": output, + } + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + await project_client.agents.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the agent when done + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py new file mode 100644 index 000000000000..f134ae144221 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -0,0 +1,96 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler in streaming from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_stream_eventhandler_async.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +from typing import Any + +from azure.ai.project.aio import AIProjectClient +from azure.ai.project.models._models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.project.models._patch import AsyncAgentEventHandler +from azure.identity import DefaultAzureCredential + +import os + + +class MyEventHandler(AsyncAgentEventHandler): + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + async def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + async def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + async def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + async def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + async def on_done(self) -> None: + print("Stream completed.") + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID {message.id}") + + async with await project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() + ) as stream: + await stream.until_done() + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py new file mode 100644 index 000000000000..505bc439c25e --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py @@ -0,0 +1,111 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_with_toolset_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler and toolset from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_toolset_async.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +from typing import Any + +from azure.ai.project.aio import AIProjectClient +from azure.ai.project.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.project.models import AsyncAgentEventHandler, AsyncFunctionTool, AsyncToolSet +from azure.ai.project.aio.operations import AgentsOperations +from azure.identity import DefaultAzureCredential + +import os + +from user_async_functions import user_async_functions + + +class MyEventHandler(AsyncAgentEventHandler): + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + async def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + async def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + async def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + async def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + async def on_done(self) -> None: + print("Stream completed.") + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + # Initialize toolset with user functions + functions = AsyncFunctionTool(user_async_functions) + toolset = AsyncToolSet() + toolset.add(functions) + + async with project_client: + + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", toolset=toolset + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", + ) + print(f"Created message, message ID {message.id}") + + async with await project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() + ) as stream: + await stream.until_done() + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_iteration_async.py new file mode 100644 index 000000000000..f3ad6ca178c1 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_iteration_async.py @@ -0,0 +1,92 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_iteration_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with interation in streaming from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_stream_iteration_async.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio + +from azure.ai.project.aio import AIProjectClient +from azure.ai.project.models import AgentStreamEvent +from azure.ai.project.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.identity import DefaultAzureCredential + +import os + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID {message.id}") + + async with await project_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: + async for event_type, event_data in stream: + + if isinstance(event_data, MessageDeltaChunk): + for content_part in event_data.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py new file mode 100644 index 000000000000..d6c2fbf5dfb3 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -0,0 +1,94 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_vector_store_batch_file_search_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_vector_store_batch_file_search_async.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import asyncio +import os +from azure.ai.project.aio import AIProjectClient +from azure.ai.project.models import FileSearchTool, FilePurpose +from azure.identity import DefaultAzureCredential + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + + # upload a file and wait for it to be processed + file = await project_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # create a vector store with no file and wait for it to be processed + vector_store = await project_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = await project_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, file_ids=[file.id] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + await project_client.agents.delete_file(file.id) + print("Deleted file") + + await project_client.agents.delete_vector_store(vector_store.id) + print("Deleted vectore store") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py new file mode 100644 index 000000000000..006d93ae45ee --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -0,0 +1,83 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_with_file_search_attachment_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations to create messages with file search attachments from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_with_file_search_attachment_async.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio + +from azure.ai.project.aio import AIProjectClient +from azure.ai.project.models import FilePurpose +from azure.ai.project.models import FileSearchTool, MessageAttachment, ToolResources +from azure.identity import DefaultAzureCredential + +import os + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + # upload a file and wait for it to be processed + async with project_client: + file = await project_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) + + # Create agent with file search tool + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id, sleep_interval=4 + ) + print(f"Created run, run ID: {run.id}") + + print(f"Run completed with status: {run.status}") + + await project_client.agents.delete_file(file.id) + print("Deleted file") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/user_async_functions.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/user_async_functions.py new file mode 100644 index 000000000000..4931352e03c6 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/async_samples/user_async_functions.py @@ -0,0 +1,29 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import asyncio +import os +import sys + + +# Add parent directory to sys.path to import user_functions +current_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.abspath(os.path.join(current_dir, "..")) +if parent_dir not in sys.path: + sys.path.insert(0, parent_dir) +from user_functions import fetch_current_datetime, fetch_weather, send_email + + +async def send_email_async(recipient: str, subject: str, body: str) -> str: + await asyncio.sleep(1) + return send_email(recipient, subject, body) + + +# Statically defined user functions for fast reference with send_email as async but the rest as sync +user_async_functions = { + "fetch_current_datetime": fetch_current_datetime, + "fetch_weather": fetch_weather, + "send_email": send_email_async, +} diff --git a/sdk/ai/azure-ai-project/samples/agents/product_info_1.md b/sdk/ai/azure-ai-project/samples/agents/product_info_1.md new file mode 100644 index 000000000000..041155831d53 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/product_info_1.md @@ -0,0 +1,51 @@ +# Information about product item_number: 1 + +## Brand +Contoso Galaxy Innovations + +## Category +Smart Eyewear + +## Features +- Augmented Reality interface +- Voice-controlled AI assistant +- HD video recording with 3D audio +- UV protection and blue light filtering +- Wireless charging with extended battery life + +## User Guide + +### 1. Introduction +Introduction to your new SmartView Glasses + +### 2. Product Overview +Overview of features and controls + +### 3. Sizing and Fit +Finding your perfect fit and style adjustments + +### 4. Proper Care and Maintenance +Cleaning and caring for your SmartView Glasses + +### 5. Break-in Period +Adjusting to the augmented reality experience + +### 6. Safety Tips +Safety guidelines for public and private spaces + +### 7. Troubleshooting +Quick fixes for common issues + +## Warranty Information +Two-year limited warranty on all electronic components + +## Contact Information +Customer Support at support@contoso-galaxy-innovations.com + +## Return Policy +30-day return policy with no questions asked + +## FAQ +- How to sync your SmartView Glasses with your devices +- Troubleshooting connection issues +- Customizing your augmented reality environment diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_basics.py new file mode 100644 index 000000000000..c8c51ea43947 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/sample_agents_basics.py @@ -0,0 +1,63 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_basics.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_basics.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os, time +from azure.ai.project import AIProjectClient +from azure.identity import DefaultAzureCredential + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + + # poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_code_interpreter_attachment.py new file mode 100644 index 000000000000..e4dbc4dd82ca --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/sample_agents_code_interpreter_attachment.py @@ -0,0 +1,80 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_code_interpreter_attachment.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with code interpreter from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_code_interpreter_attachment.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.project import AIProjectClient +from azure.ai.project.models import CodeInterpreterTool +from azure.ai.project.models import FilePurpose +from azure.ai.project.models import MessageAttachment +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + # upload a file and wait for it to be processed + file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool() + + # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # create a message with the attachment + attachment = MessageAttachment(file_id=file.id, tools=code_interpreter.definitions) + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + project_client.agents.delete_file(file.id) + print("Deleted file") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_file_search.py new file mode 100644 index 000000000000..3d9c25fc3e96 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/sample_agents_file_search.py @@ -0,0 +1,87 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_file_search.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with file searching from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_file_search.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.project import AIProjectClient +from azure.ai.project.models._patch import FileSearchTool +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + + openai_file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") + print(f"Uploaded file, file ID: {openai_file.id}") + + openai_vectorstore = project_client.agents.create_vector_store_and_poll(file_ids=[openai_file.id], name="my_vectorstore") + print(f"Created vector store, vector store ID: {openai_vectorstore.id}") + + # Create file search tool with resources + file_search = FileSearchTool(vector_store_ids=[openai_vectorstore.id]) + + # Create agent with file search tool and process assistant run + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + # Create thread for communication + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + # Delete the file when done + project_client.agents.delete_vector_store(openai_vectorstore.id) + print("Deleted vector store") + + # Delete the agent when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_functions.py new file mode 100644 index 000000000000..b73898dae603 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/sample_agents_functions.py @@ -0,0 +1,105 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_functions.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with custom functions from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_functions.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os, time +from azure.ai.project import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.project.models import FunctionTool, SubmitToolOutputsAction, RequiredFunctionToolCall +from user_functions import user_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +# Initialize function tool with user functions +functions = FunctionTool(functions=user_functions) + +with project_client: + # Create an agent and run user's request with function calls + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + print(f"Created agent, ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, ID: {run.id}") + + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + project_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append( + { + "tool_call_id": tool_call.id, + "output": output, + } + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + project_client.agents.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the agent when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_run_with_toolset.py new file mode 100644 index 000000000000..f8ac75278942 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/sample_agents_run_with_toolset.py @@ -0,0 +1,80 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_run_with_toolset.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_run_with_toolset.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.project import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.project.models import FunctionTool, ToolSet, CodeInterpreterTool +from user_functions import user_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Initialize agent toolset with user functions and code interpreter +functions = FunctionTool(user_functions) +code_interpreter = CodeInterpreterTool() + +toolset = ToolSet() +toolset.add(functions) +toolset.add(code_interpreter) + +# Create agent with toolset and process assistant run +with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset + ) + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler.py new file mode 100644 index 000000000000..86a0cab17b29 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler.py @@ -0,0 +1,98 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler in streaming from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.project import AIProjectClient +from azure.identity import DefaultAzureCredential + +from azure.ai.project.models import ( + AgentEventHandler, + MessageDeltaTextContent, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) + +from typing import Any + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + + +class MyEventHandler(AgentEventHandler): + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +with project_client: + # Create an agent and run stream with event handler + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created agent, agent ID {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + with project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_functions.py new file mode 100644 index 000000000000..ea4a6b680196 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_functions.py @@ -0,0 +1,132 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_with_functions.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler and toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_functions.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.project import AIProjectClient +from azure.ai.project.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.project.models import AgentEventHandler +from azure.identity import DefaultAzureCredential +from azure.ai.project.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction + +from typing import Any + +from user_functions import user_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + + +class MyEventHandler(AgentEventHandler): + + def __init__(self, functions: FunctionTool) -> None: + self.functions = functions + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append( + { + "tool_call_id": tool_call.id, + "output": output, + } + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + with project_client.agents.submit_tool_outputs_to_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=self + ) as stream: + stream.until_done() + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +with project_client: + functions = FunctionTool(user_functions) + + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + print(f"Created agent, ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", + ) + print(f"Created message, message ID {message.id}") + + with project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler(functions) + ) as stream: + stream.until_done() + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_toolset.py new file mode 100644 index 000000000000..b63d137a0671 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -0,0 +1,109 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_with_toolset.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler and toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_toolset.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.project import AIProjectClient +from azure.ai.project.models import Agent, MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.project.models import AgentEventHandler +from azure.ai.project.operations import AgentsOperations +from azure.identity import DefaultAzureCredential +from azure.ai.project.models import FunctionTool, ToolSet + + +import os +from typing import Any + +from user_functions import user_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + + +# When using FunctionTool with ToolSet in agent creation, the tool call events are handled inside the create_stream +# method and functions gets automatically called by default. +class MyEventHandler(AgentEventHandler): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +with project_client: + functions = FunctionTool(user_functions) + toolset = ToolSet() + toolset.add(functions) + + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset + ) + print(f"Created agent, ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", + ) + print(f"Created message, message ID {message.id}") + + with project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration.py new file mode 100644 index 000000000000..7d89bd2ab8bc --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration.py @@ -0,0 +1,92 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_iteration.py + +DESCRIPTION: + This sample demonstrates how to use agent operations in streaming from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_iteration.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.project import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.project.models import ( + AgentStreamEvent, + MessageDeltaTextContent, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +with project_client: + # Create an agent and run stream with iteration + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created agent, ID {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + with project_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: + + for event_type, event_data in stream: + + if isinstance(event_data, MessageDeltaChunk): + for content_part in event_data.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration_with_toolset.py new file mode 100644 index 000000000000..9ae1d421c9ea --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration_with_toolset.py @@ -0,0 +1,122 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_iteration_with_toolset.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with toolset and iteration in streaming from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_iteration_with_toolset.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.project import AIProjectClient +from azure.ai.project.models import AgentStreamEvent +from azure.ai.project.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.project.models import FunctionTool, ToolSet +from azure.ai.project.operations import AgentsOperations +from azure.identity import DefaultAzureCredential +from user_functions import user_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + + +# Function to handle tool stream iteration +def handle_submit_tool_outputs(operations: AgentsOperations, thread_id, run_id, tool_outputs): + try: + with operations.submit_tool_outputs_to_stream( + thread_id=thread_id, + run_id=run_id, + tool_outputs=tool_outputs, + ) as tool_stream: + for tool_event_type, tool_event_data in tool_stream: + if tool_event_type == AgentStreamEvent.ERROR: + print(f"An error occurred in tool stream. Data: {tool_event_data}") + elif tool_event_type == AgentStreamEvent.DONE: + print("Tool stream completed.") + break + else: + if isinstance(tool_event_data, MessageDeltaChunk): + handle_message_delta(tool_event_data) + + except Exception as e: + print(f"Failed to process tool stream: {e}") + + +# Function to handle message delta chunks +def handle_message_delta(delta: MessageDeltaChunk) -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + +functions = FunctionTool(user_functions) +toolset = ToolSet() +toolset.add(functions) + +with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what's the time?") + print(f"Created message, message ID {message.id}") + + with project_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: + + for event_type, event_data in stream: + + if isinstance(event_data, MessageDeltaChunk): + handle_message_delta(event_data) + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + if event_data.status == "failed": + print(f"Run failed. Error: {event_data.last_error}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_vector_store_batch_file_search.py new file mode 100644 index 000000000000..b5ddaf482310 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -0,0 +1,88 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_vector_store_batch_file_search_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_vector_store_batch_file_search_async.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.project import AIProjectClient +from azure.ai.project.models import FileSearchTool, FilePurpose +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + + # upload a file and wait for it to be processed + file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # create a vector store with no file and wait for it to be processed + vector_store = project_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = project_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, file_ids=[file.id] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + project_client.agents.delete_file(file.id) + print("Deleted file") + + project_client.agents.delete_vector_store(vector_store.id) + print("Deleted vectore store") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_with_file_search_attachment.py new file mode 100644 index 000000000000..416060fbf206 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/sample_agents_with_file_search_attachment.py @@ -0,0 +1,75 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_with_file_search_attachment.py + +DESCRIPTION: + This sample demonstrates how to use agent operations to create messages with file search attachments from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_with_file_search_attachment.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.project import AIProjectClient +from azure.ai.project.models import FilePurpose +from azure.ai.project.models import MessageAttachment +from azure.ai.project.models import FileSearchTool +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + + # upload a file and wait for it to be processed + file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create agent with file search tool + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + project_client.agents.delete_file(file.id) + print("Deleted file") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/user_functions.py b/sdk/ai/azure-ai-project/samples/agents/user_functions.py new file mode 100644 index 000000000000..8072b1b8a944 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/agents/user_functions.py @@ -0,0 +1,65 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import json +import datetime + +# These are the user-defined functions that can be called by the agent. + + +def fetch_current_datetime() -> str: + """ + Get the current time as a JSON string. + + :return: The current time in JSON format. + :rtype: str + """ + current_time = datetime.datetime.now() + time_json = json.dumps({"current_time": current_time.strftime("%Y-%m-%d %H:%M:%S")}) + return time_json + + +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +def send_email(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Email address of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +# Statically defined user functions for fast reference +user_functions = { + "fetch_current_datetime": fetch_current_datetime, + "fetch_weather": fetch_weather, + "send_email": send_email, +} diff --git a/sdk/ai/azure-ai-project/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-project/samples/connections/async_samples/sample_connections_async.py new file mode 100644 index 000000000000..33dcfdaba1dd --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/connections/async_samples/sample_connections_async.py @@ -0,0 +1,139 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_connections_async.py + +DESCRIPTION: + Given an asynchronous AIProjectClient, this sample demonstrates how to enumerate connections + and get connections properties. + +USAGE: + python sample_connections_async.py + + Before running the sample: + + pip install azure.ai.project aiohttp azure-identity + + Set the environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import asyncio +import os +from azure.ai.project.aio import AIProjectClient +from azure.ai.project.models import ConnectionType, AuthenticationType +from azure.identity import DefaultAzureCredential + + +async def sample_connections_async(): + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) + + async with project_client: + + # List the properties of all connections + connections = await project_client.connections.list() + print(f"====> Listing of all connections (found {len(connections)}):") + for connection in connections: + print(connection) + + # List the properties of all connections of a particular "type" (In this sample, Azure OpenAI connections) + connections = await project_client.connections.list( + connection_type=ConnectionType.AZURE_OPEN_AI, + ) + print("====> Listing of all Azure Open AI connections (found {len(connections)}):") + for connection in connections: + print(connection) + + # Get the properties of the default connection of a particular "type", with credentials + connection = await project_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, + with_credentials=True, # Optional. Defaults to "False" + ) + print("====> Get default Azure Open AI connection:") + print(connection) + + # Get the properties of a connection by connection name: + connection = await project_client.connections.get( + connection_name=os.environ["AI_CLIENT_CONNECTION_NAME"], + with_credentials=True, # Optional. Defaults to "False" + ) + print("====> Get connection by name:") + print(connection) + + # Examples of how you would create Inference client + if connection.connection_type == ConnectionType.AZURE_OPEN_AI: + + from openai import AsyncAzureOpenAI + + if connection.authentication_type == AuthenticationType.API_KEY: + print("====> Creating AzureOpenAI client using API key authentication") + client = AsyncAzureOpenAI( + api_key=connection.key, + azure_endpoint=connection.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + ) + elif connection.authentication_type == AuthenticationType.AAD: + print("====> Creating AzureOpenAI client using Entra ID authentication") + from azure.identity import get_bearer_token_provider + + client = AsyncAzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider( + connection.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=connection.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + ) + else: + raise ValueError(f"Authentication type {connection.authentication_type} not supported.") + + response = await client.chat.completions.create( + model="gpt-4-0613", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + print(response.choices[0].message.content) + + elif connection.connection_type == ConnectionType.SERVERLESS: + + from azure.ai.inference.aio import ChatCompletionsClient + from azure.ai.inference.models import UserMessage + + if connection.authentication_type == AuthenticationType.API_KEY: + print("====> Creating ChatCompletionsClient using API key authentication") + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) + ) + elif connection.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + print("====> Creating ChatCompletionsClient using Entra ID authentication") + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) + else: + raise ValueError(f"Authentication type {connection.authentication_type} not supported.") + + response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + await client.close() + print(response.choices[0].message.content) + + +async def main(): + await sample_connections_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/connections/sample_connections.py b/sdk/ai/azure-ai-project/samples/connections/sample_connections.py new file mode 100644 index 000000000000..5be7c4195349 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/connections/sample_connections.py @@ -0,0 +1,120 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_connections.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to enumerate connections + and get connection properties. + +USAGE: + python sample_connections.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set the environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.project import AIProjectClient +from azure.ai.project.models import ConnectionType, AuthenticationType +from openai import AzureOpenAI +from azure.ai.inference import ChatCompletionsClient +from azure.ai.inference.models import UserMessage +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from azure.core.credentials import AzureKeyCredential + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +with project_client: + # List the properties of all connections + connections = project_client.connections.list() + print(f"====> Listing of all connections (found {len(connections)}):") + for connection in connections: + print(connection) + + # List the properties of all connections of a particular "type" (In this sample, Azure OpenAI connections) + connections = project_client.connections.list( + connection_type=ConnectionType.AZURE_OPEN_AI, + ) + print("====> Listing of all Azure Open AI connections (found {len(connections)}):") + for connection in connections: + print(connection) + + # Get the properties of the default connection of a particular "type", with credentials + connection = project_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, + with_credentials=True, # Optional. Defaults to "False" + ) + print("====> Get default Azure Open AI connection:") + print(connection) + + # Get the properties of a connection by connection name: + connection = project_client.connections.get( + connection_name=os.environ["AI_CLIENT_CONNECTION_NAME"], with_credentials=True # Optional. Defaults to "False" + ) + print("====> Get connection by name:") + print(connection) + + +# Examples of how you would create Inference client +if connection.connection_type == ConnectionType.AZURE_OPEN_AI: + + if connection.authentication_type == AuthenticationType.API_KEY: + print("====> Creating AzureOpenAI client using API key authentication") + client = AzureOpenAI( + api_key=connection.key, + azure_endpoint=connection.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + ) + elif connection.authentication_type == AuthenticationType.AAD: + print("====> Creating AzureOpenAI client using Entra ID authentication") + client = AzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider( + connection.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=connection.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + ) + else: + raise ValueError(f"Authentication type {connection.authentication_type} not supported.") + + response = client.chat.completions.create( + model="gpt-4-0613", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + client.close() + print(response.choices[0].message.content) + +elif connection.connection_type == ConnectionType.SERVERLESS: + + if connection.authentication_type == AuthenticationType.API_KEY: + print("====> Creating ChatCompletionsClient using API key authentication") + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + elif connection.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + print("====> Creating ChatCompletionsClient using Entra ID authentication") + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) + else: + raise ValueError(f"Authentication type {connection.authentication_type} not supported.") + + response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + client.close() + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-project/samples/evaluations/evaluate_test_data.jsonl b/sdk/ai/azure-ai-project/samples/evaluations/evaluate_test_data.jsonl new file mode 100644 index 000000000000..0396a22c2db6 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/evaluations/evaluate_test_data.jsonl @@ -0,0 +1,3 @@ +{"query":"How do you create a run?","context":"AML API only","response":"To create a run using the Azure Machine Learning API, you first need to create an Experiment. Once you have an experiment, you can create a Run object that is associated with that experiment. Here is some Python code that demonstrates this process:\n\n```\nfrom azureml.core import Experiment, Run\nfrom azureml.core.workspace import Workspace\n\n# Define workspace and experiment\nws = Workspace.from_config()\nexp = Experiment(workspace=ws, name='my_experiment')\n\n# Create a new run\nrun = exp.start_logging()\n```\n\nIn this code, the `from_config()` method reads the configuration file that you created when you set up your Azure Machine Learning workspace. The `Experiment` constructor creates an Experiment object that is associated with your workspace, and the `start_logging()` method creates a new Run object that is associated with the Experiment. Now you can use the `run` object to log metrics, upload files, and track other information related to your machine learning experiment.", "ground_truth":"Paris is the capital of France."} +{"query":"How do you log a model?","context":"Logging can be done using any OSS Sdk","response":"There are a few ways to log models in Azure Machine Learning. \n\nOne way is to use the `register_model()` method of the `Run` object. The `register_model()` method logs a model file in the Azure Machine Learning service workspace and makes it available for deployment. Here's an example:\n\n```python\nfrom azureml.core import Model\n\nmodel_path = '.\/outputs\/my_model.pkl'\nmodel = Model.register(workspace=ws, model_path=model_path, model_name='my_model')\n```\n\nThis code registers the model file located at `model_path` to the Azure Machine Learning service workspace with the name `my_model`. \n\nAnother way to log a model is to save it as an output of a `Run`. If your model generation code is part of a script or Jupyter notebook that runs as an Azure Machine Learning experiment, you can save the model file as an output of the `Run` object. Here's an example:\n\n```python\nfrom sklearn.linear_model import LogisticRegression\nfrom azureml.core.run import Run\n\n# Initialize a run object\nrun = Run.get_context()\n\n# Train your model\nX_train, y_train = ...\nclf = LogisticRegression().fit(X_train, y_train)\n\n# Save the model to the Run object's outputs directory\nmodel_path = 'outputs\/model.pkl'\njoblib.dump(value=clf, filename=model_path)\n\n# Log the model as a run artifact\nrun.upload_file(name=model_path, path_or_stream=model_path)\n```\n\nIn this code, `Run.get_context()` retrieves the current run context object, which you can use to track metadata and metrics for the run. After training your model, you can use `joblib.dump()` to save the model to a file, and then log the file as an artifact of the run using `run.upload_file()`.","ground_truth":"Paris is the capital of France."} +{"query":"What is the capital of France?","context":"France is in Europe","response":"Paris is the capital of France.", "ground_truth":"Paris is the capital of France."} diff --git a/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations.py new file mode 100644 index 000000000000..096224c70888 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations.py @@ -0,0 +1,88 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_basics.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_evaluations.py + + Before running the sample: + + pip install azure-identity + pip install "git+https://github.com/Azure/azure-sdk-for-python.git@users/singankit/ai_project_utils#egg=azure-ai-client&subdirectory=sdk/ai/azure-ai-client" + pip install "git+https://github.com/Azure/azure-sdk-for-python.git@users/singankit/demo_evaluators_id#egg=azure-ai-evaluation&subdirectory=sdk/evaluation/azure-ai-evaluation" + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os, time +from azure.ai.project import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.project.models import Evaluation, Dataset, EvaluatorConfiguration, ConnectionType +from azure.ai.evaluation import F1ScoreEvaluator, RelevanceEvaluator, HateUnfairnessEvaluator + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Upload data for evaluation +# Service side fix needed to make this work +# data_id = project_client.upload_file("./evaluate_test_data.jsonl") +data_id = "azureml://locations/eastus2/workspaces/faa79f3d-91b3-4ed5-afdc-4cc0fe13fb85/data/remote-evals-data/versions/3" + +default_connection = project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) + + + +# Create an evaluation +evaluation = Evaluation( + display_name="Remote Evaluation", + description="Evaluation of dataset", + data=Dataset(id=data_id), + evaluators={ + "f1_score": EvaluatorConfiguration( + id=F1ScoreEvaluator.evaluator_id, + ), + "relevance": EvaluatorConfiguration( + id=RelevanceEvaluator.evaluator_id, + init_params={ + "model_config": default_connection.to_evaluator_model_config(deployment_name="GPT-4-Prod", api_version="2024-08-01-preview") + }, + ), + "hate_unfairness": EvaluatorConfiguration( + id=HateUnfairnessEvaluator.evaluator_id, + init_params={ + "azure_ai_project": project_client.scope + }, + ), + }, + # This is needed as a workaround until environment gets published to registry + properties={"Environment": "azureml://registries/jamahaja-evals-registry/environments/eval-remote-env/versions/6"}, +) + +# Create evaluation +evaluation_response = project_client.evaluations.create( + evaluation=evaluation, +) + +# Get evaluation +get_evaluation_response = project_client.evaluations.get(evaluation_response.id) + +print("----------------------------------------------------------------") +print("Created evaluation, evaluation ID: ", get_evaluation_response.id) +print("Evaluation status: ", get_evaluation_response.status) +print("AI Studio URI: ", get_evaluation_response.properties["AiStudioEvaluationUri"]) +print("----------------------------------------------------------------") diff --git a/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations_schedules.py b/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations_schedules.py new file mode 100644 index 000000000000..f436b149ab6d --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations_schedules.py @@ -0,0 +1,69 @@ +from azure.ai.project import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.project.models import ( + AppInsightsConfiguration, + EvaluatorConfiguration, + SamplingStrategy, + EvaluationSchedule, + CronTrigger, +) + + +def main(): + app_insights_config = AppInsightsConfiguration( + resource_id="sample_id", query="your_connection_string", service_name="sample_service_name" + ) + + f1_evaluator_config = EvaluatorConfiguration( + id="azureml://registries/jamahaja-evals-registry/models/F1ScoreEvaluator/versions/1" + ) + + custom_relevance_evaluator_config = EvaluatorConfiguration( + id="azureml://registries/jamahaja-evals-registry/models/Relevance-Evaluator-AI-Evaluation/versions/2", + init_params={"param3": "value3", "param4": "value4"}, + data_mapping={"data3": "value3", "data4": "value4"}, + ) + + cron_expression = "0 0 0 1/1 * ? *" + cron_trigger = CronTrigger(expression=cron_expression) + evaluators = {"f1_score": f1_evaluator_config, "relevance": custom_relevance_evaluator_config} + + sampling_strategy = SamplingStrategy(rate=0.2) + display_name = "Sample Online Evaluation Schedule" + description = "Sample Online Evaluation Schedule Description" + tags = {"tag1": "value1", "tag2": "value2"} + properties = {"property1": "value1", "property2": "value2"} + + evaluation_schedule = EvaluationSchedule( + data=app_insights_config, + evaluators=evaluators, + trigger=cron_trigger, + sampling_strategy=sampling_strategy, + display_name=display_name, + description=description, + tags=tags, + properties=properties, + ) + + # Project Configuration + Subscription = "" + ResourceGroup = "" + Workspace = "" + Endpoint = "" + client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=f"{Endpoint};{Subscription};{ResourceGroup};{Workspace}", + logging_enable=True, + ) + client.evaluations + evaluation_schedule = client.evaluations.create_or_replace_schedule( + id="sample_schedule_id", resource=evaluation_schedule + ) + client.evaluations.get_schedule(evaluation_schedule.id) + client.evaluations.list_schedule() + client.evaluations.list() + client.evaluations.delete_schedule(evaluation_schedule.id) + + +if __name__ == "__main__": + main() diff --git a/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_azure_openai_client_async.py b/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_azure_openai_client_async.py new file mode 100644 index 000000000000..bf2f4324e6bd --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_azure_openai_client_async.py @@ -0,0 +1,57 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_azure_openai_client_async.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + AsyncAzureOpenAI client from the azure.ai.inference package. + +USAGE: + python sample_get_azure_openai_client_async.py + + Before running the sample: + + pip install azure.ai.project aiohttp openai_async + + Set this environment variable with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os +import asyncio +from azure.ai.project.aio import AIProjectClient +from azure.identity import DefaultAzureCredential + + +async def sample_get_azure_openai_client_async(): + + async with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) as project_client: + + # Get an authenticated AsyncAzureOpenAI client for your default Azure OpenAI connection: + async with await project_client.inference.get_azure_openai_client() as client: + + response = await client.chat.completions.create( + model="gpt-4-0613", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print(response.choices[0].message.content) + + +async def main(): + await sample_get_azure_openai_client_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_chat_completions_client_async.py b/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_chat_completions_client_async.py new file mode 100644 index 000000000000..95a6735b7493 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_chat_completions_client_async.py @@ -0,0 +1,49 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_chat_completions_client_async.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + async ChatCompletionsClient from the azure.ai.inference package. + +USAGE: + python sample_get_chat_completions_client_async.py + + Before running the sample: + + pip install azure.ai.project aiohttp azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os +import asyncio +from azure.ai.project.aio import AIProjectClient +from azure.ai.inference.models import UserMessage +from azure.identity import DefaultAzureCredential + + +async def sample_get_chat_completions_client_async(): + + async with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) as project_client: + + # Get an authenticated async ChatCompletionsClient (from azure.ai.inference) for your default Serverless connection: + async with await project_client.inference.get_chat_completions_client() as client: + + response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + print(response.choices[0].message.content) + + +async def main(): + await sample_get_chat_completions_client_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_embeddings_client_async.py b/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_embeddings_client_async.py new file mode 100644 index 000000000000..d18836f79ce3 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_embeddings_client_async.py @@ -0,0 +1,54 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_embeddings_client_async.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + async EmbeddingsClient from the azure.ai.inference package. + +USAGE: + python sample_get_embeddings_client_async.py + + Before running the sample: + + pip install azure.ai.project aiohttp azure-identity + + Set this environment variable with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import os +from azure.ai.project.aio import AIProjectClient +from azure.identity import DefaultAzureCredential + + +async def sample_get_embeddings_client_async(): + + async with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) as project_client: + + # Get an authenticated async azure.ai.inference embeddings client for your default Serverless connection: + async with await project_client.inference.get_embeddings_client() as client: + + response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) + + for item in response.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) + + +async def main(): + await sample_get_embeddings_client_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/inference/sample_get_azure_openai_client.py b/sdk/ai/azure-ai-project/samples/inference/sample_get_azure_openai_client.py new file mode 100644 index 000000000000..5f68cc4865e9 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/inference/sample_get_azure_openai_client.py @@ -0,0 +1,45 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_azure_openai_client.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + AsyncAzureOpenAI client from the azure.ai.inference package. + +USAGE: + python sample_get_azure_openai_client.py + + Before running the sample: + + pip install azure.ai.project openai + + Set this environment variable with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os +from azure.ai.project import AIProjectClient +from azure.identity import DefaultAzureCredential + +with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) as project_client: + + # Get an authenticated OpenAI client for your default Azure OpenAI connection: + with project_client.inference.get_azure_openai_client() as client: + + response = client.chat.completions.create( + model="gpt-4-0613", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-project/samples/inference/sample_get_chat_completions_client.py b/sdk/ai/azure-ai-project/samples/inference/sample_get_chat_completions_client.py new file mode 100644 index 000000000000..03e1da53a60a --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/inference/sample_get_chat_completions_client.py @@ -0,0 +1,38 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_chat_completions_client.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + async ChatCompletionsClient from the azure.ai.inference package. + +USAGE: + python sample_get_chat_completions_client.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os +from azure.ai.project import AIProjectClient +from azure.ai.inference.models import UserMessage +from azure.identity import DefaultAzureCredential + +with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) as project_client: + + # Get an authenticated azure.ai.inference chat completions client for your default Serverless connection: + with project_client.inference.get_chat_completions_client() as client: + + response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-project/samples/inference/sample_get_embeddings_client.py b/sdk/ai/azure-ai-project/samples/inference/sample_get_embeddings_client.py new file mode 100644 index 000000000000..96e7d97618f2 --- /dev/null +++ b/sdk/ai/azure-ai-project/samples/inference/sample_get_embeddings_client.py @@ -0,0 +1,42 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_embeddings_client.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + async EmbeddingsClient from the azure.ai.inference package. + +USAGE: + python sample_get_embeddings_client.py + + Before running the sample: + + pip install azure.ai.project azure-identity + + Set this environment variable with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os +from azure.ai.project import AIProjectClient +from azure.identity import DefaultAzureCredential + +with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) as project_client: + + # Get an authenticated azure.ai.inference embeddings client for your default Serverless connection: + with project_client.inference.get_embeddings_client() as client: + + response = client.embed(input=["first phrase", "second phrase", "third phrase"]) + + for item in response.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) diff --git a/sdk/ai/azure-ai-project/setup.py b/sdk/ai/azure-ai-project/setup.py index 6985effa6f6a..3fe61e5b4587 100644 --- a/sdk/ai/azure-ai-project/setup.py +++ b/sdk/ai/azure-ai-project/setup.py @@ -13,7 +13,7 @@ PACKAGE_NAME = "azure-ai-project" -PACKAGE_PPRINT_NAME = "Azure Ai Project" +PACKAGE_PPRINT_NAME = "Azure AI Project" # a-b-c => a/b/c package_folder_path = PACKAGE_NAME.replace("-", "/") diff --git a/sdk/ai/azure-ai-project/tests/README.md b/sdk/ai/azure-ai-project/tests/README.md new file mode 100644 index 000000000000..76d76aee9c52 --- /dev/null +++ b/sdk/ai/azure-ai-project/tests/README.md @@ -0,0 +1,79 @@ +# copied from azure-ai-inference TODO update + +# Azure AI client library tests for Python + +The instructions below are for running tests locally, on a Windows machine, against the live service. + +## Prerequisites + +The live tests were written against the AI models mentioned below. You will need to deploy a gpt-4o model in the Azure OpenAI Studio, and have the endpoint and key for it: + +- `gpt-4o` on Azure OpenAI (AOAI), for Agents tests + +## Setup + +- Clone or download this sample repository. +- Open a command prompt window in the folder `sdk\ai\azure-ai-client`. +- If you want to run tests against the latest published client library, install it by running: + ```bash + pip install azure-ai-client + ``` +- If you want to run tests against a locally built client library: + - First build the wheel: + ```bash + pip install wheel + pip install -r dev_requirements.txt + python setup.py bdist_wheel + ``` + - Then install the resulting local wheel (update version `1.0.0b2` to the current one): + ```bash + pip install dist\azure_ai_client-1.0.0b1-py3-none-any.whl --user --force-reinstall + ``` + +## Set environment variables + +Here is the list of environment variables used by the tests: + +```bash +# For agents, including tools +set AZURE_AI_CLIENT_AGENTS_CONNECTION_STRING= +``` + + + +## Configure test proxy + +Configure the test proxy to run live service tests without recordings: + +```bash +set AZURE_TEST_RUN_LIVE=true +set AZURE_SKIP_LIVE_RECORDING=true +set PROXY_URL=http://localhost:5000 +set AZURE_TEST_USE_CLI_AUTH=true +``` + +## Run tests + +To run all tests, type: + +```bash +pytest +``` + +For windows run: + +```bash +python -m pytest tests\agents +``` + +## Additional information + +See [test documentation](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md) for additional information, including how to set proxy recordings and run tests using recordings. \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-project/tests/agents/test_agents_client.py new file mode 100644 index 000000000000..d271d9588b99 --- /dev/null +++ b/sdk/ai/azure-ai-project/tests/agents/test_agents_client.py @@ -0,0 +1,1119 @@ +# pylint: disable=too-many-lines +# # ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os +import json +import time +import functools +import datetime +import logging +import sys + +from azure.ai.project import AIProjectClient +from azure.ai.project.models import FunctionTool, CodeInterpreterTool, FileSearchTool, ToolSet +from azure.core.pipeline.transport import RequestsTransport +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy +from azure.core.exceptions import AzureError, ServiceRequestError, HttpResponseError +from azure.ai.project.models import FunctionTool +from azure.identity import DefaultAzureCredential + +# TODO clean this up / get rid of anything not in use + +""" +issues I've noticed with the code: + delete_thread(thread.id) fails + cancel_thread(thread.id) expires/times out occasionally + added time.sleep() to the beginning of my last few tests to avoid limits + when using the endpoint from Howie, delete_agent(agent.id) did not work but would not cause an error +""" + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + + +agentClientPreparer = functools.partial( + EnvironmentVariableLoader, + "azure_ai_client", + project_connection_string_agents_tests="https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", +) +""" +agentClientPreparer = functools.partial( + EnvironmentVariableLoader, + 'azure_ai_client', + azure_ai_client_host_name="https://foo.bar.some-domain.ms", + azure_ai_client_subscription_id="00000000-0000-0000-0000-000000000000", + azure_ai_client_resource_group_name="rg-resour-cegr-oupfoo1", + azure_ai_client_workspace_name="abcd-abcdabcdabcda-abcdefghijklm", +) +""" + + +# create tool for agent use +def fetch_current_datetime_live(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + time_json = json.dumps({"current_time": current_datetime}) + return time_json + + +# create tool for agent use +def fetch_current_datetime_recordings(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + time_json = json.dumps({"current_time": "2024-10-10 12:30:19"}) + return time_json + + +# Statically defined user functions for fast reference +user_functions_recording = {"fetch_current_datetime": fetch_current_datetime_recordings} +user_functions_live = {"fetch_current_datetime": fetch_current_datetime_live} + + +# The test class name needs to start with "Test" to get collected by pytest +class TestagentClient(AzureRecordedTestCase): + + # helper function: create client and using environment variables + def create_client(self, **kwargs): + # fetch environment variables + connection_string = kwargs.pop("project_connection_string_agents_tests") + credential = self.get_credential(AIProjectClient, is_async=False) + + # create and return client + client = AIProjectClient.from_connection_string( + credential=credential, + connection=connection_string, + ) + + return client + + # for debugging purposes: if a test fails and its agent has not been deleted, it will continue to show up in the agents list + """ + # NOTE: this test should not be run against a shared resource, as it will delete all agents + @agentClientPreparer() + @recorded_by_proxy + def test_clear_client(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # clear agent list + agents = client.agents.list_agents().data + for agent in agents: + client.agents.delete_agent(agent.id) + assert client.agents.list_agents().data.__len__() == 0 + + # close client + client.close() + """ + + # # ********************************************************************************** + # # + # # UNIT TESTS + # # + # # ********************************************************************************** + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - agent APIs + # # + # # ********************************************************************************** + + # test client creation + @agentClientPreparer() + @recorded_by_proxy + def test_create_client(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # close client + client.close() + + # test agent creation and deletion + @agentClientPreparer() + @recorded_by_proxy + def test_create_delete_agent(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + print("Created client") + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test agent creation with tools + @agentClientPreparer() + @recorded_by_proxy + def test_create_agent_with_tools(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # initialize agent functions + functions = FunctionTool(functions=user_functions_recording) + + # create agent with tools + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=functions.definitions + ) + assert agent.id + print("Created agent, agent ID", agent.id) + assert agent.tools + assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + @agentClientPreparer() + @recorded_by_proxy + def test_update_agent(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + + # update agent and confirm changes went through + agent.update(name="my-agent2", instructions="You are helpful agent") + assert agent.name + assert agent.name == "my-agent2" + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + """ + DISABLED: can't perform consistently on shared resource + @agentClientPreparer() + @recorded_by_proxy + def test_agent_list(self, **kwargs): + # create client and ensure there are no previous agents + client = self.create_client(**kwargs) + list_length = client.agents.list_agents().data.__len__() + + # create agent and check that it appears in the list + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert client.agents.list_agents().data.__len__() == list_length + 1 + assert client.agents.list_agents().data[0].id == agent.id + + # create second agent and check that it appears in the list + agent2 = client.agents.create_agent(model="gpt-4o", name="my-agent2", instructions="You are helpful agent") + assert client.agents.list_agents().data.__len__() == list_length + 2 + assert client.agents.list_agents().data[0].id == agent.id or client.agents.list_agents().data[1].id == agent.id + + # delete agents and check list + client.agents.delete_agent(agent.id) + assert client.agents.list_agents().data.__len__() == list_length + 1 + assert client.agents.list_agents().data[0].id == agent2.id + + client.agents.delete_agent(agent2.id) + assert client.agents.list_agents().data.__len__() == list_length + print("Deleted agents") + + # close client + client.close() + """ + + # ********************************************************************************** + # + # HAPPY PATH SERVICE TESTS - Thread APIs + # + # ********************************************************************************** + + # test creating thread + @agentClientPreparer() + @recorded_by_proxy + def test_create_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + # assert isinstance(thread, agentThread) TODO finish this ! need to import agentThread from _models + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test getting thread + @agentClientPreparer() + @recorded_by_proxy + def test_get_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # get thread + thread2 = client.agents.get_thread(thread.id) + assert thread2.id + assert thread.id == thread2.id + print("Got thread, thread ID", thread2.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + """ + TODO what can I update a thread with? + # test updating thread + @agentClientPreparer() + @recorded_by_proxy + def test_update_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # update thread + client.agents.update_thread(thread.id, ) # TODO what can we update it with? + assert not thread + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + """ + # TODO this test is failing? client.agents.delete_thread(thread.id) isn't working + # status_code = 404, response = + # error_map = {304: , 401: , 409: } + + # test deleting thread + @agentClientPreparer() + @recorded_by_proxy + def test_delete_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + # assert isinstance(thread, agentThread) TODO finish this ! need to import agentThread from _models + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete thread + deletion_status = client.agents.delete_thread(thread.id) + # assert not thread + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Message APIs + # # + # # ********************************************************************************** + + # test creating message in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_create_message(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test creating multiple messages in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_create_multiple_messages(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create messages + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") + assert message2.id + print("Created message, message ID", message2.id) + message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") + assert message3.id + print("Created message, message ID", message3.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test listing messages in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_list_messages(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check that initial message list is empty + messages0 = client.agents.list_messages(thread_id=thread.id) + print(messages0.data) + assert messages0.data.__len__() == 0 + + # create messages and check message list for each one + message1 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message1.id + print("Created message, message ID", message1.id) + messages1 = client.agents.list_messages(thread_id=thread.id) + assert messages1.data.__len__() == 1 + assert messages1.data[0].id == message1.id + + message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") + assert message2.id + print("Created message, message ID", message2.id) + messages2 = client.agents.list_messages(thread_id=thread.id) + assert messages2.data.__len__() == 2 + assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id + + message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") + assert message3.id + print("Created message, message ID", message3.id) + messages3 = client.agents.list_messages(thread_id=thread.id) + assert messages3.data.__len__() == 3 + assert ( + messages3.data[0].id == message3.id + or messages3.data[1].id == message2.id + or messages3.data[2].id == message2.id + ) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test getting message in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_get_message(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # get message + message2 = client.agents.get_message(thread_id=thread.id, message_id=message.id) + assert message2.id + assert message.id == message2.id + print("Got message, message ID", message.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + """ + TODO format the updated body + # test updating message in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_update_message(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # update message + body_json = json.dumps # TODO format body into json -- figure out what the message looks like so I can update it (might be in that picture) + client.agents.update_message(thread_id=thread.id, message_id=message.id, body=) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Run APIs + # # + # # ********************************************************************************** + + # test creating run + @agentClientPreparer() + @recorded_by_proxy + def test_create_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test getting run + @agentClientPreparer() + @recorded_by_proxy + def test_get_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # get run + run2 = client.agents.get_run(thread_id=thread.id, run_id=run.id) + assert run2.id + assert run.id == run2.id + print("Got run, run ID", run2.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # TODO fix bc sometimes it works? and sometimes it doesnt? + # test sucessful run status TODO test for cancelled/unsucessful runs + @agentClientPreparer() + @recorded_by_proxy + def test_run_status(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + print("Run status:", run.status) + + assert run.status in ["cancelled", "failed", "completed", "expired"] + print("Run completed with status:", run.status) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + """ + # TODO another, but check that the number of runs decreases after cancelling runs + # TODO can each thread only support one run? + # test listing runs + @agentClientPreparer() + @recorded_by_proxy + def test_list_runs(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check list for current runs + runs0 = client.agents.list_runs(thread_id=thread.id) + assert runs0.data.__len__() == 0 + + # create run and check list + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + runs1 = client.agents.list_runs(thread_id=thread.id) + assert runs1.data.__len__() == 1 + assert runs1.data[0].id == run.id + + # create second run + run2 = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run2.id + print("Created run, run ID", run2.id) + runs2 = client.agents.list_runs(thread_id=thread.id) + assert runs2.data.__len__() == 2 + assert runs2.data[0].id == run2.id or runs2.data[1].id == run2.id + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + """ + # TODO figure out what to update the run with + # test updating run + @agentClientPreparer() + @recorded_by_proxy + def test_update_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # update run + body = json.dumps({'todo': 'placeholder'}) + client.agents.update_run(thread_id=thread.id, run_id=run.id, body=body) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + # test submitting tool outputs to run + @agentClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # Initialize agent tools + functions = FunctionTool(user_functions_recording) + code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + + # create agent + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent", toolset=toolset + ) + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print( + "No tool calls provided - cancelling run" + ) # TODO how can i make sure that it wants tools? should i have some kind of error message? + client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here + print("Tool outputs:", tool_outputs) + if tool_outputs: + client.agents.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + messages = client.agents.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + hour12 = time.strftime("%H") + hour24 = time.strftime("%I") + minute = time.strftime("%M") + assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute + print("Used tool_outputs") + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + """ + # DISABLED: rewrite to ensure run is not complete when cancel_run is called + # test cancelling run + @agentClientPreparer() + @recorded_by_proxy + def test_cancel_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # check status and cancel + assert run.status in ["queued", "in_progress", "requires_action"] + client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + + while run.status in ["queued", "cancelling"]: + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + print("Current run status:", run.status) + assert run.status == "cancelled" + print("Run cancelled") + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + # test create thread and run + @agentClientPreparer() + @recorded_by_proxy + def test_create_thread_and_run(self, **kwargs): + time.sleep(26) + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread and run + run = client.agents.create_thread_and_run(assistant_id=agent.id) + assert run.id + assert run.thread_id + print("Created run, run ID", run.id) + + # get thread + thread = client.agents.get_thread(run.thread_id) + assert thread.id + print("Created thread, thread ID", thread.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + # assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + assert run.status == "completed" + print("Run completed") + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test listing run steps + @agentClientPreparer() + @recorded_by_proxy + def test_list_run_step(self, **kwargs): + + time.sleep(50) + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + # commenting assertion out below, do we know exactly when run starts? + # assert steps['data'].__len__() == 0 + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + assert steps["data"].__len__() > 0 # TODO what else should we look at? + + assert run.status == "completed" + print("Run completed") + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test getting run step + # TODO where are step ids from + @agentClientPreparer() + @recorded_by_proxy + def test_get_run_step(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + # list steps, check that get_run_step works with first step_id + steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + assert steps["data"].__len__() > 0 + step = steps["data"][0] + get_step = client.agents.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + assert step == get_step + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Streaming APIs + # # + # # ********************************************************************************** + + # # ********************************************************************************** + # # + # # NEGATIVE TESTS - TODO idk what goes here + # # + # # ********************************************************************************** + + """ + # DISABLED, PASSES LIVE ONLY: recordings don't capture DNS lookup errors + # test agent creation and deletion + @agentClientPreparer() + @recorded_by_proxy + def test_negative_create_delete_agent(self, **kwargs): + # create client using bad endpoint + bad_connection_string = "https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm" + + credential = self.get_credential(AIProjectClient, is_async=False) + client = AIProjectClient.from_connection_string( + credential=credential, + connection=bad_connection_string, + ) + + # attempt to create agent with bad client + exception_caught = False + try: + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + # check for error (will not have a status code since it failed on request -- no response was recieved) + except (ServiceRequestError, HttpResponseError) as e: + exception_caught = True + if type(e) == ServiceRequestError: + assert e.message + assert "failed to resolve 'foo.bar.some-domain.ms'" in e.message.lower() + else: + assert "No such host is known" and "foo.bar.some-domain.ms" in str(e) + + # close client and confirm an exception was caught + client.close() + assert exception_caught + """ diff --git a/sdk/ai/azure-ai-project/tests/conftest.py b/sdk/ai/azure-ai-project/tests/conftest.py new file mode 100644 index 000000000000..d944cdf86007 --- /dev/null +++ b/sdk/ai/azure-ai-project/tests/conftest.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import pytest +from devtools_testutils import test_proxy, remove_batch_sanitizers + + +# autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method +@pytest.fixture(scope="session", autouse=True) +def start_proxy(test_proxy): + return + + +@pytest.fixture(scope="session", autouse=True) +def add_sanitizers(test_proxy): + # Remove the following sanitizers since certain fields are needed in tests and are non-sensitive: + # - AZSDK3493: $..name + remove_batch_sanitizers(["AZSDK3493"]) diff --git a/sdk/ai/azure-ai-project/tests/endpoints/unit_tests.py b/sdk/ai/azure-ai-project/tests/endpoints/unit_tests.py new file mode 100644 index 000000000000..417a305ccd89 --- /dev/null +++ b/sdk/ai/azure-ai-project/tests/endpoints/unit_tests.py @@ -0,0 +1,198 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import copy +import sys +import logging +import datetime +import pytest +from azure.ai.project.models import SASTokenCredential +from azure.core.credentials import TokenCredential, AccessToken +from azure.core.exceptions import HttpResponseError +from azure.ai.project.models._models import ThreadRun, RunStep, ThreadMessage +from azure.ai.project.models._patch import _safe_instantiate, _filter_parameters + + +# import azure.ai.project as sdk + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + + +class FakeTokenCredential(TokenCredential): + def get_token(self, *scopes, **kwargs): + # Create a fake token with an expiration time + token = "fake_token" + expires_on = datetime.datetime.now() + datetime.timedelta(hours=1) + return AccessToken(token, expires_on.timestamp()) + + +# The test class name needs to start with "Test" to get collected by pytest +class TestUnit: + + # ********************************************************************************** + # + # UNIT TESTS + # + # ********************************************************************************** + + def test_sas_token_credential_class_mocked(self, **kwargs): + import jwt + import datetime + import time + + # Create a simple JWT with 10 seconds expiration time + token_duration_sec = 5 + secret_key = "my_secret_key" + token_duration_sec = 5 + sas_token_expiration: datetime = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta( + seconds=token_duration_sec + ) + sas_token_expiration = sas_token_expiration.replace(microsecond=0) + payload = {"exp": sas_token_expiration} + sas_token = jwt.encode(payload, secret_key) + + # You can parse the token string on https://jwt.ms/. The "exp" value there is the + # token expiration time in Unix timestamp format (seconds since 1970-01-01 00:00:00 UTC). + # See https://www.epochconverter.com/ to convert Unix time to readable date & time. + # The base64 decoded string will look something like this: + # { + # "alg": "HS256", + # "typ": "JWT" + # }.{ + # "exp": 1727208894 + # }.[Signature] + print(f"Generated JWT token: {sas_token}") + + sas_token_credential = SASTokenCredential( + sas_token=sas_token, + credential=FakeTokenCredential(), + subscription_id="fake_subscription_id", + resource_group_name="fake_resouce_group", + project_name="fake_project_name", + connection_name="fake_connection_name", + ) + assert sas_token_credential._expires_on == sas_token_expiration + + exception_caught = False + try: + for _ in range(token_duration_sec + 2): + print("Looping...") + time.sleep(1) + access_token = sas_token_credential.get_token() + except HttpResponseError as e: + exception_caught = True + print(e) + assert exception_caught + + # Unit tests for the SASTokenCredential class + def test_sas_token_credential_class_real(self, **kwargs): + + # Example of real SAS token for AOAI service. You can parse it on https://jwt.ms/. The "exp" value there is the + # token expiration time in Unix timestamp format (seconds since 1970-01-01 00:00:00 UTC) + token = "eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleTEiLCJ0eXAiOiJKV1QifQ.eyJyZWdpb24iOiJlYXN0dXMyZXVhcCIsInN1YnNjcmlwdGlvbi1pZCI6IjQyZjVlYWFjMjc5MDRiMGViMDI4ZTVkZjcyYzg5ZDAxIiwicHJvZHVjdC1pZCI6Ik9wZW5BSS5TMCIsImNvZ25pdGl2ZS1zZXJ2aWNlcy1lbmRwb2ludCI6Imh0dHBzOi8vYXBpLmNvZ25pdGl2ZS5taWNyb3NvZnQuY29tL2ludGVybmFsL3YxLjAvIiwiYXp1cmUtcmVzb3VyY2UtaWQiOiIvc3Vic2NyaXB0aW9ucy84ZjMzOGY2ZS00ZmNlLTQ0YWUtOTY5Yy1mYzdkOGZkYTAzMGUvcmVzb3VyY2VHcm91cHMvYXJncnlnb3JfY2FuYXJ5L3Byb3ZpZGVycy9NaWNyb3NvZnQuQ29nbml0aXZlU2VydmljZXMvYWNjb3VudHMvYXJncnlnb3ItY2FuYXJ5LWFvYWkiLCJzY29wZSI6Imh0dHBzOi8vc3BlZWNoLnBsYXRmb3JtLmJpbmcuY29tIiwiYXVkIjoidXJuOm1zLnNwZWVjaCIsImV4cCI6MTcyNjc4MjI0NiwiaXNzIjoidXJuOm1zLmNvZ25pdGl2ZXNlcnZpY2VzIn0.L7VvsXPzbwHQeMS-o9Za4itkU6uP4-KFMyOpTsYD9tpIJa_qChMHDl8FHy5n7K5L1coKg8sJE6LlJICFdU1ALQ" + expiration_date_linux_time = 1726782246 # Value of "exp" field in the token. See https://www.epochconverter.com/ to convert to date & time + expiration_datatime_utc = datetime.datetime.fromtimestamp(expiration_date_linux_time, datetime.timezone.utc) + print(f"\n[TEST] Expected expiration date: {expiration_datatime_utc}") + + sas_token_credential = SASTokenCredential( + sas_token=token, + credential=None, + subscription_id=None, + resource_group_name=None, + project_name=None, + connection_name=None, + ) + + print(f"[TEST] Actual expiration date: {sas_token_credential._expires_on}") + assert sas_token_credential._expires_on == expiration_datatime_utc + + @pytest.mark.parametrize( + "valid_params,model_cls", + [ + ( + { + "id": "12345", + "object": "thread.run", + "thread_id": "6789", + "assistant_id": "101112", + "status": "in_progress", + "required_action": "test", + "last_error": "none", + "model": "gpt-4", + "instructions": "Test instruction", + "tools": "Test function", + "created_at": datetime.datetime(2024, 11, 14), + "expires_at": datetime.datetime(2024, 11, 17), + "started_at": datetime.datetime(2024, 11, 15), + "completed_at": datetime.datetime(2024, 11, 16), + "cancelled_at": datetime.datetime(2024, 11, 16), + "failed_at": datetime.datetime(2024, 11, 16), + "incomplete_details": "max_completion_tokens", + "usage": "in_progress", + "temperature": 1.0, + "top_p": 1.0, + "max_completion_tokens": 1000, + "truncation_strategy": "test", + "tool_choice": "tool name", + "response_format": "json", + "metadata": {"foo": "bar"}, + "tool_resources": "test", + "parallel_tool_calls": True, + }, + ThreadRun, + ), + ( + { + "id": "1233", + "object": "thread.message", + "created_at": datetime.datetime(2024, 11, 14), + "thread_id": "5678", + "status": "incomplete", + "incomplete_details": "test", + "completed_at": datetime.datetime(2024, 11, 16), + "incomplete_at": datetime.datetime(2024, 11, 16), + "role": "assistant", + "content": "Test", + "assistant_id": "9911", + "run_id": "11", + "attachments": ["4", "8", "15", "16", "23", "42"], + "metadata": {"foo", "bar"}, + }, + ThreadMessage, + ), + ], + ) + def test_correct_thread_params(self, valid_params, model_cls): + """Test that if service returned extra parameter in SSE response, it does not create issues.""" + + bad_params = {"foo": "bar"} + params = copy.deepcopy(valid_params) + params.update(bad_params) + # We should bot e able to create Thread Run with bad parameters. + with pytest.raises(TypeError): + model_cls(**params) + filtered_params = _filter_parameters(model_cls, params) + for k in valid_params: + assert k in filtered_params + for k in bad_params: + assert k not in filtered_params + # Implicitly check that we can create object with the filtered parameters. + model_cls(**filtered_params) + # Check safe initialization. + assert isinstance(_safe_instantiate(model_cls, params), model_cls) + + def test_safe_instantiate_non_dict(self): + """Test that safe_instantiate method when user supplies not a dictionary.""" + assert _safe_instantiate(RunStep, 42) == 42 From a8fe98616859e51c31b3cebec0dd00e975378d60 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 22 Oct 2024 13:39:31 -0700 Subject: [PATCH 045/138] Fix wrong import --- sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py | 1 - 1 file changed, 1 deletion(-) diff --git a/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py index 59fd303c56e5..2210357144e0 100644 --- a/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py +++ b/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py @@ -33,7 +33,6 @@ CodeInterpreterToolDefinition, CodeInterpreterToolResource, RequiredFunctionToolCall, - ConnectionType, ) from abc import ABC, abstractmethod From 78e68947587af10f338d04467db528e32bdeddee Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Tue, 22 Oct 2024 16:02:22 -0500 Subject: [PATCH 046/138] M hietala/agent tools updates (#38031) * no need for function name separately in function definitions * more changes related to not needing to specify function names separately * adding capability to remove vector stores from file search tool * adding remove file to code interpreter * review comment related change --------- Co-authored-by: Marko Hietala --- .../azure/ai/client/models/_patch.py | 32 +++++++++++++++---- ...ts_vector_store_batch_file_search_async.py | 19 +++++++++-- ...e_agents_vector_store_batch_file_search.py | 18 ++++++++++- .../samples/agents/user_functions.py | 11 ++++--- 4 files changed, 65 insertions(+), 15 deletions(-) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index fbc689a7b5cd..4146a03f4579 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -37,7 +37,7 @@ ) from abc import ABC, abstractmethod -from typing import AsyncIterator, Awaitable, Callable, List, Dict, Any, Type, Optional, Iterator, Tuple, get_origin +from typing import AsyncIterator, Awaitable, Callable, List, Dict, Any, Type, Optional, Iterator, Tuple, Set, get_origin logger = logging.getLogger(__name__) @@ -275,18 +275,22 @@ class FunctionTool(Tool): A tool that executes user-defined functions. """ - def __init__(self, functions: Dict[str, Any]): + def __init__(self, functions: Set[Callable[[], Any]]): """ Initialize FunctionTool with a dictionary of functions. - :param functions: A dictionary where keys are function names and values are the function objects. + :param functions: A set of function objects. """ - self._functions = functions + self._functions = self._create_function_dict(functions) self._definitions = self._build_function_definitions(functions) - def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDefinition]: + def _create_function_dict(self, funcs: Set[Callable[[], Any]]) -> Dict[str, Callable[[], Any]]: + func_dict = {func.__name__: func for func in funcs} + return func_dict + + def _build_function_definitions(self, functions: Set[Callable[[], Any]]) -> List[ToolDefinition]: specs = [] - for name, func in functions.items(): + for func in functions: sig = inspect.signature(func) params = sig.parameters docstring = inspect.getdoc(func) @@ -299,7 +303,7 @@ def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDef properties[param_name] = {"type": param_type, "description": param_description} function_def = FunctionDefinition( - name=name, + name=func.__name__, description=description, parameters={"type": "object", "properties": properties, "required": list(params.keys())}, ) @@ -386,6 +390,12 @@ def add_vector_store(self, store_id: str): """ self.vector_store_ids.append(store_id) + def remove_vector_store(self, store_id: str): + """ + Remove a vector store ID from the list of vector stores to search for files. + """ + self.vector_store_ids.remove(store_id) + @property def definitions(self) -> List[ToolDefinition]: """ @@ -420,6 +430,14 @@ def add_file(self, file_id: str): """ self.file_ids.append(file_id) + def remove_file(self, file_id: str): + """ + Remove a file ID to the list of files to interpret. + + :param file_id: The ID of the file to remove. + """ + self.file_ids.remove(file_id) + @property def definitions(self) -> List[ToolDefinition]: """ diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py index c04468049742..63906d1e26e2 100644 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -77,6 +77,21 @@ async def main(): run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Created run, run ID: {run.id}") + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + await ai_client.agents.update_agent(assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources) + print(f"Updated agent, agent ID: {agent.id}") + + thread = await ai_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?") + print(f"Created message, message ID: {message.id}") + + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + await ai_client.agents.delete_file(file.id) print("Deleted file") @@ -87,8 +102,8 @@ async def main(): print("Deleted agent") messages = await ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") + print(f"Messages: {messages}") if __name__ == "__main__": - asyncio.run(main()) + asyncio.run(main()) \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py index d40ecf5d0fe7..362fa5a53449 100644 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -8,7 +8,8 @@ DESCRIPTION: This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from - the Azure Agents service using a synchronous client. + the Azure Agents service using a synchronous client. It also shows how to remove a vector store from file search tool + and update the agent after that. USAGE: python sample_agents_vector_store_batch_file_search_async.py @@ -75,6 +76,21 @@ run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Created run, run ID: {run.id}") + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + ai_client.agents.update_agent(assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources) + print(f"Updated agent, agent ID: {agent.id}") + + thread = ai_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?") + print(f"Created message, message ID: {message.id}") + + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + ai_client.agents.delete_file(file.id) print("Deleted file") diff --git a/sdk/ai/azure-ai-client/samples/agents/user_functions.py b/sdk/ai/azure-ai-client/samples/agents/user_functions.py index 8072b1b8a944..c1f9ef275751 100644 --- a/sdk/ai/azure-ai-client/samples/agents/user_functions.py +++ b/sdk/ai/azure-ai-client/samples/agents/user_functions.py @@ -5,6 +5,7 @@ import json import datetime +from typing import Any, Callable, Set # These are the user-defined functions that can be called by the agent. @@ -58,8 +59,8 @@ def send_email(recipient: str, subject: str, body: str) -> str: # Statically defined user functions for fast reference -user_functions = { - "fetch_current_datetime": fetch_current_datetime, - "fetch_weather": fetch_weather, - "send_email": send_email, -} +user_functions: Set[Callable[..., Any]] = { + fetch_current_datetime, + fetch_weather, + send_email, +} \ No newline at end of file From 5eae9e6ac981fa36c1b02465df7385c4699b65b1 Mon Sep 17 00:00:00 2001 From: Sai Kothinti Date: Wed, 23 Oct 2024 12:21:15 +0530 Subject: [PATCH 047/138] add changes to latest typespec and ai-project changes (#38051) --- .../azure/ai/project/models/_models.py | 8 +- .../sample_evaluations_schedules.py | 104 +++++++++--------- sdk/ai/azure-ai-project/tsp-location.yaml | 2 +- 3 files changed, 60 insertions(+), 54 deletions(-) diff --git a/sdk/ai/azure-ai-project/azure/ai/project/models/_models.py b/sdk/ai/azure-ai-project/azure/ai/project/models/_models.py index 455ce2ffd760..45f9b650cfd1 100644 --- a/sdk/ai/azure-ai-project/azure/ai/project/models/_models.py +++ b/sdk/ai/azure-ai-project/azure/ai/project/models/_models.py @@ -2865,7 +2865,7 @@ class RecurrenceTrigger(Trigger, discriminator="Recurrence"): :vartype frequency: str or ~azure.ai.project.models.Frequency :ivar interval: Specifies schedule interval in conjunction with frequency. Required. :vartype interval: int - :ivar schedule: The recurrence schedule. Required. + :ivar schedule: The recurrence schedule. :vartype schedule: ~azure.ai.project.models.RecurrenceSchedule """ @@ -2876,8 +2876,8 @@ class RecurrenceTrigger(Trigger, discriminator="Recurrence"): \"Hour\", and \"Minute\".""" interval: int = rest_field() """Specifies schedule interval in conjunction with frequency. Required.""" - schedule: "_models.RecurrenceSchedule" = rest_field() - """The recurrence schedule. Required.""" + schedule: Optional["_models.RecurrenceSchedule"] = rest_field() + """The recurrence schedule.""" @overload def __init__( @@ -2885,7 +2885,7 @@ def __init__( *, frequency: Union[str, "_models.Frequency"], interval: int, - schedule: "_models.RecurrenceSchedule", + schedule: Optional["_models.RecurrenceSchedule"] = None, ) -> None: ... @overload diff --git a/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations_schedules.py b/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations_schedules.py index f436b149ab6d..48fb825aa98c 100644 --- a/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations_schedules.py +++ b/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations_schedules.py @@ -1,69 +1,75 @@ from azure.ai.project import AIProjectClient -from azure.identity import DefaultAzureCredential -from azure.ai.project.models import ( - AppInsightsConfiguration, - EvaluatorConfiguration, - SamplingStrategy, - EvaluationSchedule, - CronTrigger, -) - +from azure.identity import DefaultAzureCredential +from azure.ai.project.models import ApplicationInsightsConfiguration, EvaluatorConfiguration, SamplingStrategy, EvaluationSchedule, CronTrigger, RecurrenceTrigger, Frequency, RecurrenceSchedule + def main(): - app_insights_config = AppInsightsConfiguration( - resource_id="sample_id", query="your_connection_string", service_name="sample_service_name" - ) + # Project Configuration Canary + Subscription = "72c03bf3-4e69-41af-9532-dfcdc3eefef4" + ResourceGroup = "apeddau-rg-westus2" + Workspace = "apeddau-canay-ws-eastus2euap" + Endpoint = "eastus2euap.api.azureml.ms" - f1_evaluator_config = EvaluatorConfiguration( - id="azureml://registries/jamahaja-evals-registry/models/F1ScoreEvaluator/versions/1" - ) - - custom_relevance_evaluator_config = EvaluatorConfiguration( - id="azureml://registries/jamahaja-evals-registry/models/Relevance-Evaluator-AI-Evaluation/versions/2", - init_params={"param3": "value3", "param4": "value4"}, - data_mapping={"data3": "value3", "data4": "value4"}, + # Create an Azure AI client + ai_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=f"{Endpoint};{Subscription};{ResourceGroup};{Workspace}", + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging ) - cron_expression = "0 0 0 1/1 * ? *" - cron_trigger = CronTrigger(expression=cron_expression) - evaluators = {"f1_score": f1_evaluator_config, "relevance": custom_relevance_evaluator_config} + # Sample for creating an evaluation schedule with recurrence trigger of daily frequency + app_insights_config = ApplicationInsightsConfiguration( + resource_id="/subscriptions/72c03bf3-4e69-41af-9532-dfcdc3eefef4/resourceGroups/apeddau-rg-centraluseuap/providers/Microsoft.insights/components/apeddauwscentr0026977484", + query="traces | where message contains \"\"", + service_name="sample_service_name" + ) + + f1_evaluator_config = EvaluatorConfiguration( + id="azureml://registries/model-evaluation-dev-01/models/F1ScoreEval/versions/1", + init_params={ + "column_mapping": { + "response": "${data.message}", + "ground_truth": "${data.itemType}" + } + } + ) + + recurrence_trigger = RecurrenceTrigger(frequency="daily", interval=1) + evaluators = { + "f1_score": f1_evaluator_config, + } + sampling_strategy = SamplingStrategy(rate=0.2) - display_name = "Sample Online Evaluation Schedule" - description = "Sample Online Evaluation Schedule Description" + name = "CANARY-ONLINE-EVAL-TEST-WS-ENV-104" + description = "Testing Online eval command job in CANARY environment" tags = {"tag1": "value1", "tag2": "value2"} - properties = {"property1": "value1", "property2": "value2"} - + properties = {"Environment": "azureml://registries/apeddau-online-evals-registry/environments/online-eval-env/versions/1"} + evaluation_schedule = EvaluationSchedule( data=app_insights_config, evaluators=evaluators, - trigger=cron_trigger, + trigger=recurrence_trigger, sampling_strategy=sampling_strategy, - display_name=display_name, description=description, tags=tags, - properties=properties, + properties=properties ) - # Project Configuration - Subscription = "" - ResourceGroup = "" - Workspace = "" - Endpoint = "" - client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=f"{Endpoint};{Subscription};{ResourceGroup};{Workspace}", - logging_enable=True, - ) - client.evaluations - evaluation_schedule = client.evaluations.create_or_replace_schedule( - id="sample_schedule_id", resource=evaluation_schedule - ) - client.evaluations.get_schedule(evaluation_schedule.id) - client.evaluations.list_schedule() - client.evaluations.list() - client.evaluations.delete_schedule(evaluation_schedule.id) + evaluation_schedule = ai_client.evaluations.create_or_replace_schedule(name, evaluation_schedule) + print(evaluation_schedule.provisioning_status) + print(evaluation_schedule) + # Sample for get an evaluation schedule with name + evaluation_schedule = ai_client.evaluations.get_schedule(name) + print(evaluation_schedule) + # Sample for list evaluation schedules + for evaluation_schedule in ai_client.evaluations.list_schedule(): + print(evaluation_schedule) + + # Sample for delete an evaluation schedule with name + ai_client.evaluations.delete_schedule(name) + if __name__ == "__main__": - main() + main() \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/tsp-location.yaml b/sdk/ai/azure-ai-project/tsp-location.yaml index 588cfb7e08b3..069a46a40fd5 100644 --- a/sdk/ai/azure-ai-project/tsp-location.yaml +++ b/sdk/ai/azure-ai-project/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Project -commit: 271f7fac192ac0d0c94a531b98aa9d94b7816d4c +commit: 1fbee44ffdb76894e51943754f374cb210f75e11 repo: Azure/azure-rest-api-specs additionalDirectories: From 1282a641430a77ff5f5fcef7bf89e097ffcc870e Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Wed, 23 Oct 2024 10:09:46 -0700 Subject: [PATCH 048/138] Move agents test to agents folder (#38055) * Move agents test to agents folder * Copy changes to project --- .gitignore | 5 + .../azure/ai/client/models/_patch.py | 1 - .../tests/agents/test_deserialization.py | 92 +++++++++++++++++++ .../tests/endpoints/unit_tests.py | 84 ----------------- .../tests/agents/test_deserialization.py | 92 +++++++++++++++++++ .../tests/endpoints/unit_tests.py | 84 ----------------- 6 files changed, 189 insertions(+), 169 deletions(-) create mode 100644 sdk/ai/azure-ai-client/tests/agents/test_deserialization.py create mode 100644 sdk/ai/azure-ai-project/tests/agents/test_deserialization.py diff --git a/.gitignore b/.gitignore index c7da60c33609..1181f6c3eee2 100644 --- a/.gitignore +++ b/.gitignore @@ -158,3 +158,8 @@ sdk/cosmos/azure-cosmos/test/test_config.py # temporary folder to refresh SDK with cadl TempTypeSpecFiles/ + +# Eclipse project files +**/.project +**/.pydevproject +**/.settings \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py index 4146a03f4579..82bbce9de88c 100644 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py @@ -33,7 +33,6 @@ CodeInterpreterToolDefinition, CodeInterpreterToolResource, RequiredFunctionToolCall, - ConnectionType, ) from abc import ABC, abstractmethod diff --git a/sdk/ai/azure-ai-client/tests/agents/test_deserialization.py b/sdk/ai/azure-ai-client/tests/agents/test_deserialization.py new file mode 100644 index 000000000000..d42b1db75120 --- /dev/null +++ b/sdk/ai/azure-ai-client/tests/agents/test_deserialization.py @@ -0,0 +1,92 @@ +# # ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import copy +import datetime +import pytest + +from azure.ai.client.models._models import ThreadRun, RunStep, ThreadMessage +from azure.ai.client.models._patch import _safe_instantiate, _filter_parameters + +class TestDeserialization: + """Tests for deserialization of sse responses.""" + + @pytest.mark.parametrize( + "valid_params,model_cls", + [ + ( + { + "id": "12345", + "object": "thread.run", + "thread_id": "6789", + "assistant_id": "101112", + "status": "in_progress", + "required_action": "test", + "last_error": "none", + "model": "gpt-4", + "instructions": "Test instruction", + "tools": "Test function", + "created_at": datetime.datetime(2024, 11, 14), + "expires_at": datetime.datetime(2024, 11, 17), + "started_at": datetime.datetime(2024, 11, 15), + "completed_at": datetime.datetime(2024, 11, 16), + "cancelled_at": datetime.datetime(2024, 11, 16), + "failed_at": datetime.datetime(2024, 11, 16), + "incomplete_details": "max_completion_tokens", + "usage": "in_progress", + "temperature": 1.0, + "top_p": 1.0, + "max_completion_tokens": 1000, + "truncation_strategy": "test", + "tool_choice": "tool name", + "response_format": "json", + "metadata": {"foo": "bar"}, + "tool_resources": "test", + "parallel_tool_calls": True, + }, + ThreadRun, + ), + ( + { + "id": "1233", + "object": "thread.message", + "created_at": datetime.datetime(2024, 11, 14), + "thread_id": "5678", + "status": "incomplete", + "incomplete_details": "test", + "completed_at": datetime.datetime(2024, 11, 16), + "incomplete_at": datetime.datetime(2024, 11, 16), + "role": "assistant", + "content": "Test", + "assistant_id": "9911", + "run_id": "11", + "attachments": ["4", "8", "15", "16", "23", "42"], + "metadata": {"foo", "bar"}, + }, + ThreadMessage, + ), + ], + ) + def test_correct_thread_params(self, valid_params, model_cls): + """Test that if service returned extra parameter in SSE response, it does not create issues.""" + + bad_params = {"foo": "bar"} + params = copy.deepcopy(valid_params) + params.update(bad_params) + # We should bot e able to create Thread Run with bad parameters. + with pytest.raises(TypeError): + model_cls(**params) + filtered_params = _filter_parameters(model_cls, params) + for k in valid_params: + assert k in filtered_params + for k in bad_params: + assert k not in filtered_params + # Implicitly check that we can create object with the filtered parameters. + model_cls(**filtered_params) + # Check safe initialization. + assert isinstance(_safe_instantiate(model_cls, params), model_cls) + + def test_safe_instantiate_non_dict(self): + """Test that safe_instantiate method when user supplies not a dictionary.""" + assert _safe_instantiate(RunStep, 42) == 42 diff --git a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py index 30862aa8c879..341224aae4e5 100644 --- a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py +++ b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py @@ -2,17 +2,12 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -import copy import sys import logging import datetime -import pytest from azure.ai.client.models import SASTokenCredential from azure.core.credentials import TokenCredential, AccessToken from azure.core.exceptions import HttpResponseError -from azure.ai.client.models._models import ThreadRun, RunStep, ThreadMessage -from azure.ai.client.models._patch import _safe_instantiate, _filter_parameters - # import azure.ai.client as sdk @@ -117,82 +112,3 @@ def test_sas_token_credential_class_real(self, **kwargs): print(f"[TEST] Actual expiration date: {sas_token_credential._expires_on}") assert sas_token_credential._expires_on == expiration_datatime_utc - - @pytest.mark.parametrize( - "valid_params,model_cls", - [ - ( - { - "id": "12345", - "object": "thread.run", - "thread_id": "6789", - "assistant_id": "101112", - "status": "in_progress", - "required_action": "test", - "last_error": "none", - "model": "gpt-4", - "instructions": "Test instruction", - "tools": "Test function", - "created_at": datetime.datetime(2024, 11, 14), - "expires_at": datetime.datetime(2024, 11, 17), - "started_at": datetime.datetime(2024, 11, 15), - "completed_at": datetime.datetime(2024, 11, 16), - "cancelled_at": datetime.datetime(2024, 11, 16), - "failed_at": datetime.datetime(2024, 11, 16), - "incomplete_details": "max_completion_tokens", - "usage": "in_progress", - "temperature": 1.0, - "top_p": 1.0, - "max_completion_tokens": 1000, - "truncation_strategy": "test", - "tool_choice": "tool name", - "response_format": "json", - "metadata": {"foo": "bar"}, - "tool_resources": "test", - "parallel_tool_calls": True, - }, - ThreadRun, - ), - ( - { - "id": "1233", - "object": "thread.message", - "created_at": datetime.datetime(2024, 11, 14), - "thread_id": "5678", - "status": "incomplete", - "incomplete_details": "test", - "completed_at": datetime.datetime(2024, 11, 16), - "incomplete_at": datetime.datetime(2024, 11, 16), - "role": "assistant", - "content": "Test", - "assistant_id": "9911", - "run_id": "11", - "attachments": ["4", "8", "15", "16", "23", "42"], - "metadata": {"foo", "bar"}, - }, - ThreadMessage, - ), - ], - ) - def test_correct_thread_params(self, valid_params, model_cls): - """Test that if service returned extra parameter in SSE response, it does not create issues.""" - - bad_params = {"foo": "bar"} - params = copy.deepcopy(valid_params) - params.update(bad_params) - # We should bot e able to create Thread Run with bad parameters. - with pytest.raises(TypeError): - model_cls(**params) - filtered_params = _filter_parameters(model_cls, params) - for k in valid_params: - assert k in filtered_params - for k in bad_params: - assert k not in filtered_params - # Implicitly check that we can create object with the filtered parameters. - model_cls(**filtered_params) - # Check safe initialization. - assert isinstance(_safe_instantiate(model_cls, params), model_cls) - - def test_safe_instantiate_non_dict(self): - """Test that safe_instantiate method when user supplies not a dictionary.""" - assert _safe_instantiate(RunStep, 42) == 42 diff --git a/sdk/ai/azure-ai-project/tests/agents/test_deserialization.py b/sdk/ai/azure-ai-project/tests/agents/test_deserialization.py new file mode 100644 index 000000000000..afb2c5de9501 --- /dev/null +++ b/sdk/ai/azure-ai-project/tests/agents/test_deserialization.py @@ -0,0 +1,92 @@ +# # ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import copy +import datetime +import pytest + +from azure.ai.project.models._models import ThreadRun, RunStep, ThreadMessage +from azure.ai.project.models._patch import _safe_instantiate, _filter_parameters + +class TestDeserialization: + """Tests for deserialization of sse responses.""" + + @pytest.mark.parametrize( + "valid_params,model_cls", + [ + ( + { + "id": "12345", + "object": "thread.run", + "thread_id": "6789", + "assistant_id": "101112", + "status": "in_progress", + "required_action": "test", + "last_error": "none", + "model": "gpt-4", + "instructions": "Test instruction", + "tools": "Test function", + "created_at": datetime.datetime(2024, 11, 14), + "expires_at": datetime.datetime(2024, 11, 17), + "started_at": datetime.datetime(2024, 11, 15), + "completed_at": datetime.datetime(2024, 11, 16), + "cancelled_at": datetime.datetime(2024, 11, 16), + "failed_at": datetime.datetime(2024, 11, 16), + "incomplete_details": "max_completion_tokens", + "usage": "in_progress", + "temperature": 1.0, + "top_p": 1.0, + "max_completion_tokens": 1000, + "truncation_strategy": "test", + "tool_choice": "tool name", + "response_format": "json", + "metadata": {"foo": "bar"}, + "tool_resources": "test", + "parallel_tool_calls": True, + }, + ThreadRun, + ), + ( + { + "id": "1233", + "object": "thread.message", + "created_at": datetime.datetime(2024, 11, 14), + "thread_id": "5678", + "status": "incomplete", + "incomplete_details": "test", + "completed_at": datetime.datetime(2024, 11, 16), + "incomplete_at": datetime.datetime(2024, 11, 16), + "role": "assistant", + "content": "Test", + "assistant_id": "9911", + "run_id": "11", + "attachments": ["4", "8", "15", "16", "23", "42"], + "metadata": {"foo", "bar"}, + }, + ThreadMessage, + ), + ], + ) + def test_correct_thread_params(self, valid_params, model_cls): + """Test that if service returned extra parameter in SSE response, it does not create issues.""" + + bad_params = {"foo": "bar"} + params = copy.deepcopy(valid_params) + params.update(bad_params) + # We should bot e able to create Thread Run with bad parameters. + with pytest.raises(TypeError): + model_cls(**params) + filtered_params = _filter_parameters(model_cls, params) + for k in valid_params: + assert k in filtered_params + for k in bad_params: + assert k not in filtered_params + # Implicitly check that we can create object with the filtered parameters. + model_cls(**filtered_params) + # Check safe initialization. + assert isinstance(_safe_instantiate(model_cls, params), model_cls) + + def test_safe_instantiate_non_dict(self): + """Test that safe_instantiate method when user supplies not a dictionary.""" + assert _safe_instantiate(RunStep, 42) == 42 diff --git a/sdk/ai/azure-ai-project/tests/endpoints/unit_tests.py b/sdk/ai/azure-ai-project/tests/endpoints/unit_tests.py index 417a305ccd89..af0cd871de97 100644 --- a/sdk/ai/azure-ai-project/tests/endpoints/unit_tests.py +++ b/sdk/ai/azure-ai-project/tests/endpoints/unit_tests.py @@ -2,17 +2,12 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -import copy import sys import logging import datetime -import pytest from azure.ai.project.models import SASTokenCredential from azure.core.credentials import TokenCredential, AccessToken from azure.core.exceptions import HttpResponseError -from azure.ai.project.models._models import ThreadRun, RunStep, ThreadMessage -from azure.ai.project.models._patch import _safe_instantiate, _filter_parameters - # import azure.ai.project as sdk @@ -117,82 +112,3 @@ def test_sas_token_credential_class_real(self, **kwargs): print(f"[TEST] Actual expiration date: {sas_token_credential._expires_on}") assert sas_token_credential._expires_on == expiration_datatime_utc - - @pytest.mark.parametrize( - "valid_params,model_cls", - [ - ( - { - "id": "12345", - "object": "thread.run", - "thread_id": "6789", - "assistant_id": "101112", - "status": "in_progress", - "required_action": "test", - "last_error": "none", - "model": "gpt-4", - "instructions": "Test instruction", - "tools": "Test function", - "created_at": datetime.datetime(2024, 11, 14), - "expires_at": datetime.datetime(2024, 11, 17), - "started_at": datetime.datetime(2024, 11, 15), - "completed_at": datetime.datetime(2024, 11, 16), - "cancelled_at": datetime.datetime(2024, 11, 16), - "failed_at": datetime.datetime(2024, 11, 16), - "incomplete_details": "max_completion_tokens", - "usage": "in_progress", - "temperature": 1.0, - "top_p": 1.0, - "max_completion_tokens": 1000, - "truncation_strategy": "test", - "tool_choice": "tool name", - "response_format": "json", - "metadata": {"foo": "bar"}, - "tool_resources": "test", - "parallel_tool_calls": True, - }, - ThreadRun, - ), - ( - { - "id": "1233", - "object": "thread.message", - "created_at": datetime.datetime(2024, 11, 14), - "thread_id": "5678", - "status": "incomplete", - "incomplete_details": "test", - "completed_at": datetime.datetime(2024, 11, 16), - "incomplete_at": datetime.datetime(2024, 11, 16), - "role": "assistant", - "content": "Test", - "assistant_id": "9911", - "run_id": "11", - "attachments": ["4", "8", "15", "16", "23", "42"], - "metadata": {"foo", "bar"}, - }, - ThreadMessage, - ), - ], - ) - def test_correct_thread_params(self, valid_params, model_cls): - """Test that if service returned extra parameter in SSE response, it does not create issues.""" - - bad_params = {"foo": "bar"} - params = copy.deepcopy(valid_params) - params.update(bad_params) - # We should bot e able to create Thread Run with bad parameters. - with pytest.raises(TypeError): - model_cls(**params) - filtered_params = _filter_parameters(model_cls, params) - for k in valid_params: - assert k in filtered_params - for k in bad_params: - assert k not in filtered_params - # Implicitly check that we can create object with the filtered parameters. - model_cls(**filtered_params) - # Check safe initialization. - assert isinstance(_safe_instantiate(model_cls, params), model_cls) - - def test_safe_instantiate_non_dict(self): - """Test that safe_instantiate method when user supplies not a dictionary.""" - assert _safe_instantiate(RunStep, 42) == 42 From 4e950c798b78e0f34e0e723d0eb0b186461e20ea Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 23 Oct 2024 13:25:24 -0700 Subject: [PATCH 049/138] Prep for connection tests. (#38060) --- .../azure/ai/project/models/_patch.py | 4 +- sdk/ai/azure-ai-project/tests/README.md | 47 ++++++++----------- .../tests/agents/test_agents_client.py | 12 ++--- .../tests/connections/connection_test_base.py | 40 ++++++++++++++++ .../tests/connections/test_connections.py | 27 +++++++++++ .../test_connections_unit_tests.py} | 22 ++------- 6 files changed, 98 insertions(+), 54 deletions(-) create mode 100644 sdk/ai/azure-ai-project/tests/connections/connection_test_base.py create mode 100644 sdk/ai/azure-ai-project/tests/connections/test_connections.py rename sdk/ai/azure-ai-project/tests/{endpoints/unit_tests.py => connections/test_connections_unit_tests.py} (91%) diff --git a/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py index 2210357144e0..8bcafd3cf937 100644 --- a/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py +++ b/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py @@ -189,13 +189,13 @@ def _refresh_token(self) -> None: project_client = AIProjectClient( credential=self._credential, - endpoint="not-needed", # Since we are only going to use the "endpoints" operations, we don't need to supply an endpoint. http://management.azure.com is hard coded in the SDK. + endpoint="not-needed", # Since we are only going to use the "connections" operations, we don't need to supply an endpoint. http://management.azure.com is hard coded in the SDK. subscription_id=self._subscription_id, resource_group_name=self._resource_group_name, project_name=self._project_name, ) - connection = project_client.endpoints.get(connection_name=self._connection_name, populate_secrets=True) + connection = project_client.connections.get(connection_name=self._connection_name, with_credentials=True) self._sas_token = connection.properties.credentials.sas self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) diff --git a/sdk/ai/azure-ai-project/tests/README.md b/sdk/ai/azure-ai-project/tests/README.md index 76d76aee9c52..248eb90b98fc 100644 --- a/sdk/ai/azure-ai-project/tests/README.md +++ b/sdk/ai/azure-ai-project/tests/README.md @@ -1,22 +1,14 @@ -# copied from azure-ai-inference TODO update - -# Azure AI client library tests for Python +# Azure AI Project client library tests for Python The instructions below are for running tests locally, on a Windows machine, against the live service. -## Prerequisites - -The live tests were written against the AI models mentioned below. You will need to deploy a gpt-4o model in the Azure OpenAI Studio, and have the endpoint and key for it: - -- `gpt-4o` on Azure OpenAI (AOAI), for Agents tests - -## Setup +## Build and install the client library - Clone or download this sample repository. -- Open a command prompt window in the folder `sdk\ai\azure-ai-client`. +- Open a command prompt window in the folder `sdk\ai\azure-ai-project` - If you want to run tests against the latest published client library, install it by running: ```bash - pip install azure-ai-client + pip install azure-ai-project ``` - If you want to run tests against a locally built client library: - First build the wheel: @@ -25,29 +17,30 @@ The live tests were written against the AI models mentioned below. You will need pip install -r dev_requirements.txt python setup.py bdist_wheel ``` - - Then install the resulting local wheel (update version `1.0.0b2` to the current one): + - Then install the resulting local wheel (update version `1.0.0b1` to the current one): ```bash - pip install dist\azure_ai_client-1.0.0b1-py3-none-any.whl --user --force-reinstall + pip install dist\azure_ai_project-1.0.0b1-py3-none-any.whl --user --force-reinstall ``` -## Set environment variables - -Here is the list of environment variables used by the tests: +## Setup for running tests in the `agents` folder ```bash -# For agents, including tools -set AZURE_AI_CLIENT_AGENTS_CONNECTION_STRING= +set PROJECT_CONNECTION_STRING_AGENTS_TESTS= ``` - ## Configure test proxy @@ -68,10 +61,10 @@ To run all tests, type: pytest ``` -For windows run: +To run tests in a particular folder (`tests\connections` for example): ```bash -python -m pytest tests\agents +python tests\connections ``` ## Additional information diff --git a/sdk/ai/azure-ai-project/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-project/tests/agents/test_agents_client.py index d271d9588b99..2d057e90da5d 100644 --- a/sdk/ai/azure-ai-project/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-project/tests/agents/test_agents_client.py @@ -45,17 +45,17 @@ agentClientPreparer = functools.partial( EnvironmentVariableLoader, - "azure_ai_client", + "azure_ai_project", project_connection_string_agents_tests="https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", ) """ agentClientPreparer = functools.partial( EnvironmentVariableLoader, - 'azure_ai_client', - azure_ai_client_host_name="https://foo.bar.some-domain.ms", - azure_ai_client_subscription_id="00000000-0000-0000-0000-000000000000", - azure_ai_client_resource_group_name="rg-resour-cegr-oupfoo1", - azure_ai_client_workspace_name="abcd-abcdabcdabcda-abcdefghijklm", + 'azure_ai_project', + azure_ai_project_host_name="https://foo.bar.some-domain.ms", + azure_ai_project_subscription_id="00000000-0000-0000-0000-000000000000", + azure_ai_project_resource_group_name="rg-resour-cegr-oupfoo1", + azure_ai_project_workspace_name="abcd-abcdabcdabcda-abcdefghijklm", ) """ diff --git a/sdk/ai/azure-ai-project/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-project/tests/connections/connection_test_base.py new file mode 100644 index 000000000000..d2b4177f8a1e --- /dev/null +++ b/sdk/ai/azure-ai-project/tests/connections/connection_test_base.py @@ -0,0 +1,40 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import sys +import logging +import functools +from azure.ai.project import AIProjectClient +from azure.identity import DefaultAzureCredential +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader + +ServicePreparerChatCompletions = functools.partial( + EnvironmentVariableLoader, + "project_connection_string", + project_connection_string_connections_tests="endpoint;azure-subscription-id;azure-rg-name;ai-studio-hub-name", +) + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + +class ConnectionsTestBase: + + def get_sync_client(self, **kwargs) -> AIProjectClient: + conn_str = kwargs.pop("project_connection_string_connections_tests") + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=conn_str, + ) + return project_client + diff --git a/sdk/ai/azure-ai-project/tests/connections/test_connections.py b/sdk/ai/azure-ai-project/tests/connections/test_connections.py new file mode 100644 index 000000000000..b24558903416 --- /dev/null +++ b/sdk/ai/azure-ai-project/tests/connections/test_connections.py @@ -0,0 +1,27 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import sys +import logging +import datetime + +from azure.ai.project.models import SASTokenCredential +from azure.core.credentials import TokenCredential, AccessToken +from azure.core.exceptions import HttpResponseError + +from connection_test_base import ConnectionsTestBase + + +# The test class name needs to start with "Test" to get collected by pytest +class TestConnections(ConnectionsTestBase): + + def test_get_connection(self, **kwargs): + project_client = self.get_sync_client(**kwargs) + pass + + def test_get_default_connection(self, **kwargs): + pass + + def test_list_connections(self, **kwargs): + pass \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/tests/endpoints/unit_tests.py b/sdk/ai/azure-ai-project/tests/connections/test_connections_unit_tests.py similarity index 91% rename from sdk/ai/azure-ai-project/tests/endpoints/unit_tests.py rename to sdk/ai/azure-ai-project/tests/connections/test_connections_unit_tests.py index af0cd871de97..9199a91067f0 100644 --- a/sdk/ai/azure-ai-project/tests/endpoints/unit_tests.py +++ b/sdk/ai/azure-ai-project/tests/connections/test_connections_unit_tests.py @@ -2,28 +2,11 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -import sys -import logging import datetime from azure.ai.project.models import SASTokenCredential from azure.core.credentials import TokenCredential, AccessToken from azure.core.exceptions import HttpResponseError - -# import azure.ai.project as sdk - -# Set to True to enable SDK logging -LOGGING_ENABLED = True - -if LOGGING_ENABLED: - # Create a logger for the 'azure' SDK - # See https://docs.python.org/3/library/logging.html - logger = logging.getLogger("azure") - logger.setLevel(logging.DEBUG) # INFO or DEBUG - - # Configure a console output - handler = logging.StreamHandler(stream=sys.stdout) - logger.addHandler(handler) - +from connection_test_base import ConnectionsTestBase class FakeTokenCredential(TokenCredential): def get_token(self, *scopes, **kwargs): @@ -34,7 +17,7 @@ def get_token(self, *scopes, **kwargs): # The test class name needs to start with "Test" to get collected by pytest -class TestUnit: +class TestConnectionsUnitTests(ConnectionsTestBase): # ********************************************************************************** # @@ -112,3 +95,4 @@ def test_sas_token_credential_class_real(self, **kwargs): print(f"[TEST] Actual expiration date: {sas_token_credential._expires_on}") assert sas_token_credential._expires_on == expiration_datatime_utc + From dfbaaf5f2a25b9e63c559475ff4093473f05a1b0 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 23 Oct 2024 14:25:10 -0700 Subject: [PATCH 050/138] Create azure-ai-projects folder --- sdk/ai/azure-ai-projects/CHANGELOG.md | 5 + sdk/ai/azure-ai-projects/LICENSE | 21 + sdk/ai/azure-ai-projects/MANIFEST.in | 7 + sdk/ai/azure-ai-projects/README.md | 80 + sdk/ai/azure-ai-projects/azure/__init__.py | 1 + sdk/ai/azure-ai-projects/azure/ai/__init__.py | 1 + .../azure/ai/projects/__init__.py | 32 + .../azure/ai/projects/_client.py | 137 + .../azure/ai/projects/_configuration.py | 91 + .../azure/ai/projects/_model_base.py | 1159 +++ .../azure/ai/projects/_patch.py | 246 + .../azure/ai/projects/_serialization.py | 2114 +++++ .../azure/ai/projects/_types.py | 18 + .../azure/ai/projects/_vendor.py | 50 + .../azure/ai/projects/_version.py | 9 + .../azure/ai/projects/aio/__init__.py | 29 + .../azure/ai/projects/aio/_client.py | 139 + .../azure/ai/projects/aio/_configuration.py | 91 + .../azure/ai/projects/aio/_patch.py | 200 + .../ai/projects/aio/operations/__init__.py | 29 + .../ai/projects/aio/operations/_operations.py | 6049 ++++++++++++++ .../ai/projects/aio/operations/_patch.py | 1977 +++++ .../azure/ai/projects/models/__init__.py | 376 + .../azure/ai/projects/models/_enums.py | 513 ++ .../azure/ai/projects/models/_models.py | 6106 ++++++++++++++ .../azure/ai/projects/models/_patch.py | 997 +++ .../azure/ai/projects/operations/__init__.py | 29 + .../ai/projects/operations/_operations.py | 7396 +++++++++++++++++ .../azure/ai/projects/operations/_patch.py | 1982 +++++ .../azure/ai/projects/py.typed | 1 + sdk/ai/azure-ai-projects/dev_requirements.txt | 4 + .../sample_agents_basics_async.py | 76 + .../sample_agents_functions_async.py | 117 + ...sample_agents_stream_eventhandler_async.py | 96 + ..._stream_eventhandler_with_toolset_async.py | 111 + .../sample_agents_stream_iteration_async.py | 92 + ...ts_vector_store_batch_file_search_async.py | 94 + ...gents_with_file_search_attachment_async.py | 83 + .../async_samples/user_async_functions.py | 29 + .../samples/agents/product_info_1.md | 51 + .../samples/agents/sample_agents_basics.py | 63 + ...mple_agents_code_interpreter_attachment.py | 80 + .../agents/sample_agents_file_search.py | 87 + .../samples/agents/sample_agents_functions.py | 105 + .../agents/sample_agents_run_with_toolset.py | 80 + .../sample_agents_stream_eventhandler.py | 98 + ...ents_stream_eventhandler_with_functions.py | 132 + ...agents_stream_eventhandler_with_toolset.py | 109 + .../agents/sample_agents_stream_iteration.py | 92 + ...le_agents_stream_iteration_with_toolset.py | 122 + ...e_agents_vector_store_batch_file_search.py | 88 + ...mple_agents_with_file_search_attachment.py | 75 + .../samples/agents/user_functions.py | 65 + .../async_samples/sample_connections_async.py | 142 + .../samples/connections/sample_connections.py | 123 + .../evaluations/evaluate_test_data.jsonl | 3 + .../samples/evaluations/sample_evaluations.py | 88 + .../sample_evaluations_schedules.py | 75 + .../sample_get_azure_openai_client_async.py | 57 + ...ample_get_chat_completions_client_async.py | 49 + .../sample_get_embeddings_client_async.py | 54 + .../sample_get_azure_openai_client.py | 45 + .../sample_get_chat_completions_client.py | 38 + .../inference/sample_get_embeddings_client.py | 42 + sdk/ai/azure-ai-projects/setup.py | 71 + sdk/ai/azure-ai-projects/tests/README.md | 72 + .../tests/agents/test_agents_client.py | 1119 +++ .../tests/agents/test_deserialization.py | 92 + sdk/ai/azure-ai-projects/tests/conftest.py | 20 + .../tests/connections/connection_test_base.py | 40 + .../tests/connections/test_connections.py | 27 + .../test_connections_unit_tests.py | 98 + sdk/ai/azure-ai-projects/tsp-location.yaml | 4 + 73 files changed, 34093 insertions(+) create mode 100644 sdk/ai/azure-ai-projects/CHANGELOG.md create mode 100644 sdk/ai/azure-ai-projects/LICENSE create mode 100644 sdk/ai/azure-ai-projects/MANIFEST.in create mode 100644 sdk/ai/azure-ai-projects/README.md create mode 100644 sdk/ai/azure-ai-projects/azure/__init__.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/__init__.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/__init__.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/_client.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/_configuration.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/_model_base.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/_types.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/_vendor.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/_version.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/aio/__init__.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/aio/_configuration.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/__init__.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/operations/__init__.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/py.typed create mode 100644 sdk/ai/azure-ai-projects/dev_requirements.txt create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/product_info_1.md create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration_with_toolset.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/user_functions.py create mode 100644 sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/connections/sample_connections.py create mode 100644 sdk/ai/azure-ai-projects/samples/evaluations/evaluate_test_data.jsonl create mode 100644 sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py create mode 100644 sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py create mode 100644 sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_azure_openai_client_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_chat_completions_client_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_embeddings_client_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/inference/sample_get_azure_openai_client.py create mode 100644 sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client.py create mode 100644 sdk/ai/azure-ai-projects/samples/inference/sample_get_embeddings_client.py create mode 100644 sdk/ai/azure-ai-projects/setup.py create mode 100644 sdk/ai/azure-ai-projects/tests/README.md create mode 100644 sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py create mode 100644 sdk/ai/azure-ai-projects/tests/agents/test_deserialization.py create mode 100644 sdk/ai/azure-ai-projects/tests/conftest.py create mode 100644 sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py create mode 100644 sdk/ai/azure-ai-projects/tests/connections/test_connections.py create mode 100644 sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py create mode 100644 sdk/ai/azure-ai-projects/tsp-location.yaml diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md new file mode 100644 index 000000000000..628743d283a9 --- /dev/null +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -0,0 +1,5 @@ +# Release History + +## 1.0.0b1 (1970-01-01) + +- Initial version diff --git a/sdk/ai/azure-ai-projects/LICENSE b/sdk/ai/azure-ai-projects/LICENSE new file mode 100644 index 000000000000..63447fd8bbbf --- /dev/null +++ b/sdk/ai/azure-ai-projects/LICENSE @@ -0,0 +1,21 @@ +Copyright (c) Microsoft Corporation. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/MANIFEST.in b/sdk/ai/azure-ai-projects/MANIFEST.in new file mode 100644 index 000000000000..aee9f8c1ccc3 --- /dev/null +++ b/sdk/ai/azure-ai-projects/MANIFEST.in @@ -0,0 +1,7 @@ +include *.md +include LICENSE +include azure/ai/projects/py.typed +recursive-include tests *.py +recursive-include samples *.py *.md +include azure/__init__.py +include azure/ai/__init__.py \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md new file mode 100644 index 000000000000..3de22e548143 --- /dev/null +++ b/sdk/ai/azure-ai-projects/README.md @@ -0,0 +1,80 @@ + + +# Azure Ai Projects client library for Python + + +## Getting started + +### Install the package + +```bash +python -m pip install azure-ai-projects +``` + +#### Prequisites + +- Python 3.8 or later is required to use this package. +- You need an [Azure subscription][azure_sub] to use this package. +- An existing Azure Ai Projects instance. +#### Create with an Azure Active Directory Credential +To use an [Azure Active Directory (AAD) token credential][authenticate_with_token], +provide an instance of the desired credential type obtained from the +[azure-identity][azure_identity_credentials] library. + +To authenticate with AAD, you must first [pip][pip] install [`azure-identity`][azure_identity_pip] + +After setup, you can choose which type of [credential][azure_identity_credentials] from azure.identity to use. +As an example, [DefaultAzureCredential][default_azure_credential] can be used to authenticate the client: + +Set the values of the client ID, tenant ID, and client secret of the AAD application as environment variables: +`AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, `AZURE_CLIENT_SECRET` + +Use the returned token credential to authenticate the client: + +```python +>>> from azure.ai.projects import AIProjectClient +>>> from azure.identity import DefaultAzureCredential +>>> client = AIProjectClient(endpoint='', credential=DefaultAzureCredential()) +``` + +## Examples + +```python +>>> from azure.ai.projects import AIProjectClient +>>> from azure.identity import DefaultAzureCredential +>>> from azure.core.exceptions import HttpResponseError + +>>> client = AIProjectClient(endpoint='', credential=DefaultAzureCredential()) +>>> try: + + except HttpResponseError as e: + print('service responds error: {}'.format(e.response.json())) + +``` + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +see the Code of Conduct FAQ or contact opencode@microsoft.com with any +additional questions or comments. + + +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token +[azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials +[azure_identity_pip]: https://pypi.org/project/azure-identity/ +[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential +[pip]: https://pypi.org/project/pip/ +[azure_sub]: https://azure.microsoft.com/free/ + diff --git a/sdk/ai/azure-ai-projects/azure/__init__.py b/sdk/ai/azure-ai-projects/azure/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/__init__.py new file mode 100644 index 000000000000..743119593f69 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/__init__.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import AIProjectClient # type: ignore +from ._version import VERSION + +__version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AIProjectClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py new file mode 100644 index 000000000000..971602b90fb4 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py @@ -0,0 +1,137 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, TYPE_CHECKING +from typing_extensions import Self + +from azure.core import PipelineClient +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse + +from ._configuration import AIProjectClientConfiguration +from ._serialization import Deserializer, Serializer +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + + +class AIProjectClient: + """AIProjectClient. + + :ivar agents: AgentsOperations operations + :vartype agents: azure.ai.projects.operations.AgentsOperations + :ivar connections: ConnectionsOperations operations + :vartype connections: azure.ai.projects.operations.ConnectionsOperations + :ivar evaluations: EvaluationsOperations operations + :vartype evaluations: azure.ai.projects.operations.EvaluationsOperations + :param endpoint: The Azure AI Studio project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``\\\\ , where + :code:`` is the Azure region where the project is deployed (e.g. westus) and + :code:`` is the GUID of the Enterprise private link. Required. + :type endpoint: str + :param subscription_id: The Azure subscription ID. Required. + :type subscription_id: str + :param resource_group_name: The name of the Azure Resource Group. Required. + :type resource_group_name: str + :param project_name: The Azure AI Studio project name. Required. + :type project_name: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-07-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: "TokenCredential", + **kwargs: Any + ) -> None: + _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" # pylint: disable=line-too-long + self._config = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + **kwargs + ) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) + self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) + self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> Self: + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_configuration.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_configuration.py new file mode 100644 index 000000000000..6da93d003386 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_configuration.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline import policies + +from ._version import VERSION + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + + +class AIProjectClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AIProjectClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: The Azure AI Studio project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``\\ , where :code:`` + is the Azure region where the project is deployed (e.g. westus) and :code:`` + is the GUID of the Enterprise private link. Required. + :type endpoint: str + :param subscription_id: The Azure subscription ID. Required. + :type subscription_id: str + :param resource_group_name: The name of the Azure Resource Group. Required. + :type resource_group_name: str + :param project_name: The Azure AI Studio project name. Required. + :type project_name: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-07-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: "TokenCredential", + **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "2024-07-01-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + if resource_group_name is None: + raise ValueError("Parameter 'resource_group_name' must not be None.") + if project_name is None: + raise ValueError("Parameter 'project_name' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.subscription_id = subscription_id + self.resource_group_name = resource_group_name + self.project_name = project_name + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-projects/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.BearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_model_base.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_model_base.py new file mode 100644 index 000000000000..e6a2730f9276 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_model_base.py @@ -0,0 +1,1159 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access, broad-except + +import copy +import calendar +import decimal +import functools +import sys +import logging +import base64 +import re +import typing +import enum +import email.utils +from datetime import datetime, date, time, timedelta, timezone +from json import JSONEncoder +import xml.etree.ElementTree as ET +from typing_extensions import Self +import isodate +from azure.core.exceptions import DeserializationError +from azure.core import CaseInsensitiveEnumMeta +from azure.core.pipeline import PipelineResponse +from azure.core.serialization import _Null + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping + +_LOGGER = logging.getLogger(__name__) + +__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] + +TZ_UTC = timezone.utc +_T = typing.TypeVar("_T") + + +def _timedelta_as_isostr(td: timedelta) -> str: + """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' + + Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython + + :param timedelta td: The timedelta to convert + :rtype: str + :return: ISO8601 version of this timedelta + """ + + # Split seconds to larger units + seconds = td.total_seconds() + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + + days, hours, minutes = list(map(int, (days, hours, minutes))) + seconds = round(seconds, 6) + + # Build date + date_str = "" + if days: + date_str = "%sD" % days + + if hours or minutes or seconds: + # Build time + time_str = "T" + + # Hours + bigger_exists = date_str or hours + if bigger_exists: + time_str += "{:02}H".format(hours) + + # Minutes + bigger_exists = bigger_exists or minutes + if bigger_exists: + time_str += "{:02}M".format(minutes) + + # Seconds + try: + if seconds.is_integer(): + seconds_string = "{:02}".format(int(seconds)) + else: + # 9 chars long w/ leading 0, 6 digits after decimal + seconds_string = "%09.6f" % seconds + # Remove trailing zeros + seconds_string = seconds_string.rstrip("0") + except AttributeError: # int.is_integer() raises + seconds_string = "{:02}".format(seconds) + + time_str += "{}S".format(seconds_string) + else: + time_str = "" + + return "P" + date_str + time_str + + +def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: + encoded = base64.b64encode(o).decode() + if format == "base64url": + return encoded.strip("=").replace("+", "-").replace("/", "_") + return encoded + + +def _serialize_datetime(o, format: typing.Optional[str] = None): + if hasattr(o, "year") and hasattr(o, "hour"): + if format == "rfc7231": + return email.utils.format_datetime(o, usegmt=True) + if format == "unix-timestamp": + return int(calendar.timegm(o.utctimetuple())) + + # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) + if not o.tzinfo: + iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() + else: + iso_formatted = o.astimezone(TZ_UTC).isoformat() + # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) + return iso_formatted.replace("+00:00", "Z") + # Next try datetime.date or datetime.time + return o.isoformat() + + +def _is_readonly(p): + try: + return p._visibility == ["read"] + except AttributeError: + return False + + +class SdkJSONEncoder(JSONEncoder): + """A JSON encoder that's capable of serializing datetime objects and bytes.""" + + def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): + super().__init__(*args, **kwargs) + self.exclude_readonly = exclude_readonly + self.format = format + + def default(self, o): # pylint: disable=too-many-return-statements + if _is_model(o): + if self.exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + return {k: v for k, v in o.items() if k not in readonly_props} + return dict(o.items()) + try: + return super(SdkJSONEncoder, self).default(o) + except TypeError: + if isinstance(o, _Null): + return None + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, self.format) + try: + # First try datetime.datetime + return _serialize_datetime(o, self.format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return super(SdkJSONEncoder, self).default(o) + + +_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") +_VALID_RFC7231 = re.compile( + r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" + r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" +) + + +def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + attr = attr.upper() + match = _VALID_DATE.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + return date_obj + + +def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize RFC7231 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + match = _VALID_RFC7231.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + return email.utils.parsedate_to_datetime(attr) + + +def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: + """Deserialize unix timestamp into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + return datetime.fromtimestamp(attr, TZ_UTC) + + +def _deserialize_date(attr: typing.Union[str, date]) -> date: + """Deserialize ISO-8601 formatted string into Date object. + :param str attr: response string to be deserialized. + :rtype: date + :returns: The date object from that input + """ + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + if isinstance(attr, date): + return attr + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore + + +def _deserialize_time(attr: typing.Union[str, time]) -> time: + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :returns: The time object from that input + """ + if isinstance(attr, time): + return attr + return isodate.parse_time(attr) + + +def _deserialize_bytes(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + return bytes(base64.b64decode(attr)) + + +def _deserialize_bytes_base64(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return bytes(base64.b64decode(encoded)) + + +def _deserialize_duration(attr): + if isinstance(attr, timedelta): + return attr + return isodate.parse_duration(attr) + + +def _deserialize_decimal(attr): + if isinstance(attr, decimal.Decimal): + return attr + return decimal.Decimal(str(attr)) + + +def _deserialize_int_as_str(attr): + if isinstance(attr, int): + return attr + return int(attr) + + +_DESERIALIZE_MAPPING = { + datetime: _deserialize_datetime, + date: _deserialize_date, + time: _deserialize_time, + bytes: _deserialize_bytes, + bytearray: _deserialize_bytes, + timedelta: _deserialize_duration, + typing.Any: lambda x: x, + decimal.Decimal: _deserialize_decimal, +} + +_DESERIALIZE_MAPPING_WITHFORMAT = { + "rfc3339": _deserialize_datetime, + "rfc7231": _deserialize_datetime_rfc7231, + "unix-timestamp": _deserialize_datetime_unix_timestamp, + "base64": _deserialize_bytes, + "base64url": _deserialize_bytes_base64, +} + + +def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if annotation is int and rf and rf._format == "str": + return _deserialize_int_as_str + if rf and rf._format: + return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) + return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore + + +def _get_type_alias_type(module_name: str, alias_name: str): + types = { + k: v + for k, v in sys.modules[module_name].__dict__.items() + if isinstance(v, typing._GenericAlias) # type: ignore + } + if alias_name not in types: + return alias_name + return types[alias_name] + + +def _get_model(module_name: str, model_name: str): + models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} + module_end = module_name.rsplit(".", 1)[0] + models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) + if isinstance(model_name, str): + model_name = model_name.split(".")[-1] + if model_name not in models: + return model_name + return models[model_name] + + +_UNSET = object() + + +class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object + def __init__(self, data: typing.Dict[str, typing.Any]) -> None: + self._data = data + + def __contains__(self, key: typing.Any) -> bool: + return key in self._data + + def __getitem__(self, key: str) -> typing.Any: + return self._data.__getitem__(key) + + def __setitem__(self, key: str, value: typing.Any) -> None: + self._data.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + self._data.__delitem__(key) + + def __iter__(self) -> typing.Iterator[typing.Any]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __ne__(self, other: typing.Any) -> bool: + return not self.__eq__(other) + + def keys(self) -> typing.KeysView[str]: + return self._data.keys() + + def values(self) -> typing.ValuesView[typing.Any]: + return self._data.values() + + def items(self) -> typing.ItemsView[str, typing.Any]: + return self._data.items() + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + try: + return self[key] + except KeyError: + return default + + @typing.overload + def pop(self, key: str) -> typing.Any: ... + + @typing.overload + def pop(self, key: str, default: _T) -> _T: ... + + @typing.overload + def pop(self, key: str, default: typing.Any) -> typing.Any: ... + + def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.pop(key) + return self._data.pop(key, default) + + def popitem(self) -> typing.Tuple[str, typing.Any]: + return self._data.popitem() + + def clear(self) -> None: + self._data.clear() + + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + self._data.update(*args, **kwargs) + + @typing.overload + def setdefault(self, key: str, default: None = None) -> None: ... + + @typing.overload + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... + + def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.setdefault(key) + return self._data.setdefault(key, default) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_model = self.__class__(other) + except Exception: + return False + return self._data == other_model._data + + def __repr__(self) -> str: + return str(self._data) + + +def _is_model(obj: typing.Any) -> bool: + return getattr(obj, "_is_model", False) + + +def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements + if isinstance(o, list): + return [_serialize(x, format) for x in o] + if isinstance(o, dict): + return {k: _serialize(v, format) for k, v in o.items()} + if isinstance(o, set): + return {_serialize(x, format) for x in o} + if isinstance(o, tuple): + return tuple(_serialize(x, format) for x in o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, format) + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, enum.Enum): + return o.value + if isinstance(o, int): + if format == "str": + return str(o) + return o + try: + # First try datetime.datetime + return _serialize_datetime(o, format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return o + + +def _get_rest_field( + attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str +) -> typing.Optional["_RestField"]: + try: + return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) + except StopIteration: + return None + + +def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + if isinstance(value, ET.Element): + value = _deserialize(rf._type, value) + return _serialize(value, rf._format) + + +class Model(_MyMutableMapping): + _is_model = True + # label whether current class's _attr_to_rest_field has been calculated + # could not see _attr_to_rest_field directly because subclass inherits it from parent class + _calculated: typing.Set[str] = set() + + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: + class_name = self.__class__.__name__ + if len(args) > 1: + raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") + dict_to_pass = { + rest_field._rest_name: rest_field._default + for rest_field in self._attr_to_rest_field.values() + if rest_field._default is not _UNSET + } + if args: # pylint: disable=too-many-nested-blocks + if isinstance(args[0], ET.Element): + existed_attr_keys = [] + model_meta = getattr(self, "_xml", {}) + + for rf in self._attr_to_rest_field.values(): + prop_meta = getattr(rf, "_xml", {}) + xml_name = prop_meta.get("name", rf._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + # attribute + if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name)) + continue + + # unwrapped element is array + if prop_meta.get("unwrapped", False): + # unwrapped array could either use prop items meta/prop meta + if prop_meta.get("itemsName"): + xml_name = prop_meta.get("itemsName") + xml_ns = prop_meta.get("itemNs") + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + items = args[0].findall(xml_name) # pyright: ignore + if len(items) > 0: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + continue + + # text element is primitive type + if prop_meta.get("text", False): + if args[0].text is not None: + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text) + continue + + # wrapped element could be normal property or array, it should only have one element + item = args[0].find(xml_name) + if item is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, item) + + # rest thing is additional properties + for e in args[0]: + if e.tag not in existed_attr_keys: + dict_to_pass[e.tag] = _convert_element(e) + else: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) + else: + non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] + if non_attr_kwargs: + # actual type errors only throw the first wrong keyword arg they see, so following that. + raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") + dict_to_pass.update( + { + self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) + for k, v in kwargs.items() + if v is not None + } + ) + super().__init__(dict_to_pass) + + def copy(self) -> "Model": + return Model(self.__dict__) + + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: + if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: + # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', + # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' + mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order + attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") + for k, v in mro_class.__annotations__.items() + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") + + return super().__new__(cls) # pylint: disable=no-value-for-parameter + + def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: + for base in cls.__bases__: + if hasattr(base, "__mapping__"): + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore + + @classmethod + def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: + for v in cls.__dict__.values(): + if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators: + return v + return None + + @classmethod + def _deserialize(cls, data, exist_discriminators): + if not hasattr(cls, "__mapping__"): + return cls(data) + discriminator = cls._get_discriminator(exist_discriminators) + if discriminator is None: + return cls(data) + exist_discriminators.append(discriminator._rest_name) + if isinstance(data, ET.Element): + model_meta = getattr(cls, "_xml", {}) + prop_meta = getattr(discriminator, "_xml", {}) + xml_name = prop_meta.get("name", discriminator._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + if data.get(xml_name) is not None: + discriminator_value = data.get(xml_name) + else: + discriminator_value = data.find(xml_name).text # pyright: ignore + else: + discriminator_value = data.get(discriminator._rest_name) + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore + return mapped_cls._deserialize(data, exist_discriminators) + + def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: + """Return a dict that can be turned into json using json.dump. + + :keyword bool exclude_readonly: Whether to remove the readonly properties. + :returns: A dict JSON compatible object + :rtype: dict + """ + + result = {} + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] + for k, v in self.items(): + if exclude_readonly and k in readonly_props: # pyright: ignore + continue + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) + return result + + @staticmethod + def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: + if v is None or isinstance(v, _Null): + return None + if isinstance(v, (list, tuple, set)): + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) + if isinstance(v, dict): + return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} + return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v + + +def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + +def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + +def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + +def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj: typing.Dict[typing.Any, typing.Any], +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = {child.tag: child for child in obj} + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + +def _deserialize_multiple_sequence( + entry_deserializers: typing.List[typing.Optional[typing.Callable]], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) + + +def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = list(obj) + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + +def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.Any]: + return sorted( + types, + key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), + ) + + +def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-branches + annotation: typing.Any, + module: typing.Optional[str], + rf: typing.Optional["_RestField"] = None, +) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + if not annotation: + return None + + # is it a type alias? + if isinstance(annotation, str): + if module is not None: + annotation = _get_type_alias_type(module, annotation) + + # is it a forward ref / in quotes? + if isinstance(annotation, (str, typing.ForwardRef)): + try: + model_name = annotation.__forward_arg__ # type: ignore + except AttributeError: + model_name = annotation + if module is not None: + annotation = _get_model(module, model_name) + + try: + if module and _is_model(annotation): + if rf: + rf._is_model = True + + return functools.partial(_deserialize_model, annotation) # pyright: ignore + except Exception: + pass + + # is it a literal? + try: + if annotation.__origin__ is typing.Literal: # pyright: ignore + return None + except AttributeError: + pass + + # is it optional? + try: + if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore + if len(annotation.__args__) <= 2: # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + # the type is Optional[Union[...]], we need to remove the None type from the Union + annotation_copy = copy.copy(annotation) + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore + return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) + except AttributeError: + pass + + # is it union? + if getattr(annotation, "__origin__", None) is typing.Union: + # initial ordering is we make `string` the last deserialization option, because it is often them most generic + deserializers = [ + _get_deserialize_callable_from_annotation(arg, module, rf) + for arg in _sorted_annotations(annotation.__args__) # pyright: ignore + ] + + return functools.partial(_deserialize_with_union, deserializers) + + try: + if annotation._name == "Dict": # pyright: ignore + value_deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[1], module, rf # pyright: ignore + ) + + return functools.partial( + _deserialize_dict, + value_deserializer, + module, + ) + except (AttributeError, IndexError): + pass + try: + if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore + if len(annotation.__args__) > 1: # pyright: ignore + entry_deserializers = [ + _get_deserialize_callable_from_annotation(dt, module, rf) + for dt in annotation.__args__ # pyright: ignore + ] + return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) + deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[0], module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_sequence, deserializer, module) + except (TypeError, IndexError, AttributeError, SyntaxError): + pass + + def _deserialize_default( + deserializer, + obj, + ): + if obj is None: + return obj + try: + return _deserialize_with_callable(deserializer, obj) + except Exception: + pass + return obj + + if get_deserializer(annotation, rf): + return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) + + return functools.partial(_deserialize_default, annotation) + + +def _deserialize_with_callable( + deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], + value: typing.Any, +): # pylint: disable=too-many-return-statements + try: + if value is None or isinstance(value, _Null): + return None + if isinstance(value, ET.Element): + if deserializer is str: + return value.text or "" + if deserializer is int: + return int(value.text) if value.text else None + if deserializer is float: + return float(value.text) if value.text else None + if deserializer is bool: + return value.text == "true" if value.text else None + if deserializer is None: + return value + if deserializer in [int, float, bool]: + return deserializer(value) + if isinstance(deserializer, CaseInsensitiveEnumMeta): + try: + return deserializer(value) + except ValueError: + # for unknown value, return raw value + return value + if isinstance(deserializer, type) and issubclass(deserializer, Model): + return deserializer._deserialize(value, []) + return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) + except Exception as e: + raise DeserializationError() from e + + +def _deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + if isinstance(value, PipelineResponse): + value = value.http_response.json() + if rf is None and format: + rf = _RestField(format=format) + if not isinstance(deserializer, functools.partial): + deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) + return _deserialize_with_callable(deserializer, value) + + +class _RestField: + def __init__( + self, + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + is_discriminator: bool = False, + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, + ): + self._type = type + self._rest_name_input = name + self._module: typing.Optional[str] = None + self._is_discriminator = is_discriminator + self._visibility = visibility + self._is_model = False + self._default = default + self._format = format + self._is_multipart_file_input = is_multipart_file_input + self._xml = xml if xml is not None else {} + + @property + def _class_type(self) -> typing.Any: + return getattr(self._type, "args", [None])[0] + + @property + def _rest_name(self) -> str: + if self._rest_name_input is None: + raise ValueError("Rest name was never set") + return self._rest_name_input + + def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin + # by this point, type and rest_name will have a value bc we default + # them in __new__ of the Model class + item = obj.get(self._rest_name) + if item is None: + return item + if self._is_model: + return item + return _deserialize(self._type, _serialize(item, self._format), rf=self) + + def __set__(self, obj: Model, value) -> None: + if value is None: + # we want to wipe out entries if users set attr to None + try: + obj.__delitem__(self._rest_name) + except KeyError: + pass + return + if self._is_model: + if not _is_model(value): + value = _deserialize(self._type, value) + obj.__setitem__(self._rest_name, value) + return + obj.__setitem__(self._rest_name, _serialize(value, self._format)) + + def _get_deserialize_callable_from_annotation( + self, annotation: typing.Any + ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + return _get_deserialize_callable_from_annotation(annotation, self._module, self) + + +def rest_field( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + xml=xml, + ) + + +def rest_discriminator( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) + + +def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: + """Serialize a model to XML. + + :param Model model: The model to serialize. + :param bool exclude_readonly: Whether to exclude readonly properties. + :returns: The XML representation of the model. + :rtype: str + """ + return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore + + +def _get_element( + o: typing.Any, + exclude_readonly: bool = False, + parent_meta: typing.Optional[typing.Dict[str, typing.Any]] = None, + wrapped_element: typing.Optional[ET.Element] = None, +) -> typing.Union[ET.Element, typing.List[ET.Element]]: + if _is_model(o): + model_meta = getattr(o, "_xml", {}) + + # if prop is a model, then use the prop element directly, else generate a wrapper of model + if wrapped_element is None: + wrapped_element = _create_xml_element( + model_meta.get("name", o.__class__.__name__), + model_meta.get("prefix"), + model_meta.get("ns"), + ) + + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + + for k, v in o.items(): + # do not serialize readonly properties + if exclude_readonly and k in readonly_props: + continue + + prop_rest_field = _get_rest_field(o._attr_to_rest_field, k) + if prop_rest_field: + prop_meta = getattr(prop_rest_field, "_xml").copy() + # use the wire name as xml name if no specific name is set + if prop_meta.get("name") is None: + prop_meta["name"] = k + else: + # additional properties will not have rest field, use the wire name as xml name + prop_meta = {"name": k} + + # if no ns for prop, use model's + if prop_meta.get("ns") is None and model_meta.get("ns"): + prop_meta["ns"] = model_meta.get("ns") + prop_meta["prefix"] = model_meta.get("prefix") + + if prop_meta.get("unwrapped", False): + # unwrapped could only set on array + wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta)) + elif prop_meta.get("text", False): + # text could only set on primitive type + wrapped_element.text = _get_primitive_type_value(v) + elif prop_meta.get("attribute", False): + xml_name = prop_meta.get("name", k) + if prop_meta.get("ns"): + ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore + xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore + # attribute should be primitive type + wrapped_element.set(xml_name, _get_primitive_type_value(v)) + else: + # other wrapped prop element + wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta)) + return wrapped_element + if isinstance(o, list): + return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore + if isinstance(o, dict): + result = [] + for k, v in o.items(): + result.append( + _get_wrapped_element( + v, + exclude_readonly, + { + "name": k, + "ns": parent_meta.get("ns") if parent_meta else None, + "prefix": parent_meta.get("prefix") if parent_meta else None, + }, + ) + ) + return result + + # primitive case need to create element based on parent_meta + if parent_meta: + return _get_wrapped_element( + o, + exclude_readonly, + { + "name": parent_meta.get("itemsName", parent_meta.get("name")), + "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")), + "ns": parent_meta.get("itemsNs", parent_meta.get("ns")), + }, + ) + + raise ValueError("Could not serialize value into xml: " + o) + + +def _get_wrapped_element( + v: typing.Any, + exclude_readonly: bool, + meta: typing.Optional[typing.Dict[str, typing.Any]], +) -> ET.Element: + wrapped_element = _create_xml_element( + meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None + ) + if isinstance(v, (dict, list)): + wrapped_element.extend(_get_element(v, exclude_readonly, meta)) + elif _is_model(v): + _get_element(v, exclude_readonly, meta, wrapped_element) + else: + wrapped_element.text = _get_primitive_type_value(v) + return wrapped_element + + +def _get_primitive_type_value(v) -> str: + if v is True: + return "true" + if v is False: + return "false" + if isinstance(v, _Null): + return "" + return str(v) + + +def _create_xml_element(tag, prefix=None, ns=None): + if prefix and ns: + ET.register_namespace(prefix, ns) + if ns: + return ET.Element("{" + ns + "}" + tag) + return ET.Element(tag) + + +def _deserialize_xml( + deserializer: typing.Any, + value: str, +) -> typing.Any: + element = ET.fromstring(value) # nosec + return _deserialize(deserializer, element) + + +def _convert_element(e: ET.Element): + # dict case + if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: + dict_result: typing.Dict[str, typing.Any] = {} + for child in e: + if dict_result.get(child.tag) is not None: + if isinstance(dict_result[child.tag], list): + dict_result[child.tag].append(_convert_element(child)) + else: + dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)] + else: + dict_result[child.tag] = _convert_element(child) + dict_result.update(e.attrib) + return dict_result + # array case + if len(e) > 0: + array_result: typing.List[typing.Any] = [] + for child in e: + array_result.append(_convert_element(child)) + return array_result + # primitive case + return e.text diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py new file mode 100644 index 000000000000..53c3c5b6697b --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -0,0 +1,246 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +import uuid +from os import PathLike +from pathlib import Path +from typing import List, Any, Union, Dict +from typing_extensions import Self +from azure.core.credentials import TokenCredential +from azure.core import PipelineClient +from azure.core.pipeline import policies +from ._configuration import AIProjectClientConfiguration +from ._serialization import Deserializer, Serializer +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations +from ._client import AIProjectClient as ClientGenerated +from .operations._patch import InferenceOperations + + +class AIProjectClient(ClientGenerated): + + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: "TokenCredential", + **kwargs: Any, + ) -> None: + # TODO: Validate input formats with regex match (e.g. subscription ID) + if not endpoint: + raise ValueError("endpoint is required") + if not subscription_id: + raise ValueError("subscription_id ID is required") + if not resource_group_name: + raise ValueError("resource_group_name is required") + if not project_name: + raise ValueError("project_name is required") + if not credential: + raise ValueError("Credential is required") + if "api_version" in kwargs: + raise ValueError("No support for overriding the API version") + if "credential_scopes" in kwargs: + raise ValueError("No support for overriding the credential scopes") + + kwargs1 = kwargs.copy() + kwargs2 = kwargs.copy() + kwargs3 = kwargs.copy() + + # For Endpoints operations (enumerating connections, getting SAS tokens) + _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + self._config1 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2024-07-01-preview", + credential_scopes=["https://management.azure.com"], + **kwargs1, + ) + _policies1 = kwargs1.pop("policies", None) + if _policies1 is None: + _policies1 = [ + policies.RequestIdPolicy(**kwargs1), + self._config1.headers_policy, + self._config1.user_agent_policy, + self._config1.proxy_policy, + policies.ContentDecodePolicy(**kwargs1), + self._config1.redirect_policy, + self._config1.retry_policy, + self._config1.authentication_policy, + self._config1.custom_hook_policy, + self._config1.logging_policy, + policies.DistributedTracingPolicy(**kwargs1), + policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, + self._config1.http_logging_policy, + ] + self._client1 = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) + + # For Agents operations + _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + self._config2 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2024-07-01-preview", # TODO: Update me + credential_scopes=["https://ml.azure.com"], + **kwargs2, + ) + _policies2 = kwargs2.pop("policies", None) + if _policies2 is None: + _policies2 = [ + policies.RequestIdPolicy(**kwargs2), + self._config2.headers_policy, + self._config2.user_agent_policy, + self._config2.proxy_policy, + policies.ContentDecodePolicy(**kwargs2), + self._config2.redirect_policy, + self._config2.retry_policy, + self._config2.authentication_policy, + self._config2.custom_hook_policy, + self._config2.logging_policy, + policies.DistributedTracingPolicy(**kwargs2), + policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, + self._config2.http_logging_policy, + ] + self._client2 = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) + + # For Cloud Evaluations operations + _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + self._config3 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2024-07-01-preview", # TODO: Update me + credential_scopes=["https://ml.azure.com"], # TODO: Update once service changes are ready + **kwargs3, + ) + _policies3 = kwargs3.pop("policies", None) + if _policies3 is None: + _policies3 = [ + policies.RequestIdPolicy(**kwargs3), + self._config3.headers_policy, + self._config3.user_agent_policy, + self._config3.proxy_policy, + policies.ContentDecodePolicy(**kwargs3), + self._config3.redirect_policy, + self._config3.retry_policy, + self._config3.authentication_policy, + self._config3.custom_hook_policy, + self._config3.logging_policy, + policies.DistributedTracingPolicy(**kwargs3), + policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, + self._config3.http_logging_policy, + ] + self._client3 = PipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) + self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) + self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) + self.inference = InferenceOperations(self) + + def close(self) -> None: + self._client1.close() + self._client2.close() + self._client3.close() + + def __enter__(self) -> Self: + self._client1.__enter__() + self._client2.__enter__() + self._client3.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client1.__exit__(*exc_details) + self._client2.__exit__(*exc_details) + self._client3.__exit__(*exc_details) + + @classmethod + def from_connection_string(cls, conn_str: str, credential: "TokenCredential", **kwargs) -> "AIProjectClient": + """ + Create an AIProjectClient from a connection string. + + :param conn_str: The connection string, copied from your AI Studio project. + """ + if not conn_str: + raise ValueError("Connection string is required") + parts = conn_str.split(";") + if len(parts) != 4: + raise ValueError("Invalid connection string format") + endpoint = "https://" + parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + project_name = parts[3] + return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) + + def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: + """Upload a file to the Azure AI Studio project. + This method required *azure-ai-ml* to be installed. + + :param file_path: The path to the file to upload. + :type file_path: Union[str, Path, PathLike] + :return: The asset id of uploaded file. + :rtype: str + """ + try: + from azure.ai.ml import MLClient + from azure.ai.ml.entities import Data + from azure.ai.ml.constants import AssetTypes + except ImportError: + raise ImportError( + "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`") + + data = Data( + path=file_path, + type=AssetTypes.URI_FILE, + name=str(uuid.uuid4()), # generating random name + is_anonymous=True, + version="1", + ) + + ml_client = MLClient( + self._config3.credential, + self._config3.subscription_id, + self._config3.resource_group_name, + self._config3.project_name, + ) + + data_asset = ml_client.data.create_or_update(data) + + return data_asset.id + + @property + def scope(self) -> Dict[str, str]: + return { + "subscription_id": self._config3.subscription_id, + "resource_group_name": self._config3.resource_group_name, + "project_name": self._config3.project_name, + } + +__all__: List[str] = [ + "AIProjectClient", +] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py new file mode 100644 index 000000000000..ce17d1798ce7 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_serialization.py @@ -0,0 +1,2114 @@ +# pylint: disable=too-many-lines +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Dict, + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + TypeVar, + MutableMapping, + Type, + List, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore + +from azure.core.exceptions import DeserializationError, SerializationError +from azure.core.serialization import NULL as CoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +ModelType = TypeVar("ModelType", bound="Model") +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + :return: The deserialized data. + :rtype: object + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) from err + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError as err: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise DeserializationError("XML is invalid") from err + elif content_type.startswith("text/"): + return data_as_str + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + + :param bytes body_bytes: The body of the response. + :param dict headers: The headers of the response. + :returns: The deserialized data. + :rtype: object + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + + +class UTC(datetime.tzinfo): + """Time Zone info for handling UTC""" + + def utcoffset(self, dt): + """UTF offset for UTC is 0. + + :param datetime.datetime dt: The datetime + :returns: The offset + :rtype: datetime.timedelta + """ + return datetime.timedelta(0) + + def tzname(self, dt): + """Timestamp representation. + + :param datetime.datetime dt: The datetime + :returns: The timestamp representation + :rtype: str + """ + return "Z" + + def dst(self, dt): + """No daylight saving for UTC. + + :param datetime.datetime dt: The datetime + :returns: The daylight saving time + :rtype: datetime.timedelta + """ + return datetime.timedelta(hours=1) + + +try: + from datetime import timezone as _FixedOffset # type: ignore +except ImportError: # Python 2.7 + + class _FixedOffset(datetime.tzinfo): # type: ignore + """Fixed offset in minutes east from UTC. + Copy/pasted from Python doc + :param datetime.timedelta offset: offset in timedelta format + """ + + def __init__(self, offset) -> None: + self.__offset = offset + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return str(self.__offset.total_seconds() / 3600) + + def __repr__(self): + return "".format(self.tzname(None)) + + def dst(self, dt): + return datetime.timedelta(0) + + def __getinitargs__(self): + return (self.__offset,) + + +try: + from datetime import timezone + + TZ_UTC = timezone.utc +except ImportError: + TZ_UTC = UTC() # type: ignore + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Optional[Dict[str, Any]] = {} + for k in kwargs: # pylint: disable=consider-using-dict-items + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are equal + :rtype: bool + """ + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes. + + :param object other: The object to compare + :returns: True if objects are not equal + :rtype: bool + """ + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node. + + :returns: The XML node + :rtype: xml.etree.ElementTree.Element + """ + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to server from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, keep_readonly=keep_readonly, **kwargs + ) + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize( # type: ignore # pylint: disable=protected-access + self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs + ) + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: # pylint: disable=broad-exception-caught + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + :rtype: ModelType + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def from_dict( + cls: Type[ModelType], + data: Any, + key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> ModelType: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param function key_extractors: A key extractor function. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + :rtype: ModelType + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) # type: ignore + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result.update(objects[valuetype]._flatten_subtype(key, objects)) # pylint: disable=protected-access + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + + :param dict response: The initial data + :param dict objects: The class objects + :returns: The class to be used + :rtype: class + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + :returns: The decoded key + :rtype: str + """ + return key.replace("\\.", ".") + + +class Serializer(object): # pylint: disable=too-many-public-methods + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals + self, target_obj, data_type=None, **kwargs + ): + """Serialize data into a string according to type. + + :param object target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises: SerializationError if serialization fails. + :returns: The serialized data. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() # pylint: disable=protected-access + try: + attributes = target_obj._attribute_map # pylint: disable=protected-access + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access + attr_name, {} + ).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized.update(target_obj.additional_properties) + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, + # we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError as err: + if isinstance(err, SerializationError): + raise + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise SerializationError(msg) from err + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises: SerializationError if serialization fails. + :raises: ValueError if data is None + :returns: The serialized request body + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access + except DeserializationError as err: + raise SerializationError("Unable to build a model: " + str(err)) from err + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param str name: The name of the URL path parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :returns: The serialized URL path + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + output = output.replace("{", quote("{")).replace("}", quote("}")) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param str name: The name of the query parameter. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, list + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + :returns: The serialized query parameter + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + do_quote = not kwargs.get("skip_quote", False) + return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param str name: The name of the header. + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + :returns: The serialized header + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError as exc: + raise TypeError("{} must be type {}.".format(name, data_type)) from exc + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param object data: The data to be serialized. + :param str data_type: The type to be serialized from. + :raises: AttributeError if required data is None. + :raises: ValueError if data is None + :raises: SerializationError if serialization fails. + :returns: The serialized data. + :rtype: str, int, float, bool, dict, list + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is CoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + if data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, data.__class__) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise SerializationError(msg.format(data, data_type)) from err + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param obj data: Object to be serialized. + :param str data_type: Type of object in the iterable. + :rtype: str, int, float, bool + :return: serialized object + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec # pylint: disable=eval-used + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param str data: Object to be serialized. + :rtype: str + :return: serialized object + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list data: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + Defaults to False. + :rtype: list, str + :return: serialized iterable + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized.append(None) + + if kwargs.get("do_quote", False): + serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :rtype: dict + :return: serialized dictionary + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError as err: + if isinstance(err, SerializationError): + raise + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + :return: serialized object + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + if obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError as exc: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) from exc + + @staticmethod + def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument + """Serialize bytearray into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument + """Serialize str into base-64 string. + + :param str attr: Object to be serialized. + :rtype: str + :return: serialized base64 + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Decimal object to float. + + :param decimal attr: Object to be serialized. + :rtype: float + :return: serialized decimal + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): # pylint: disable=unused-argument + """Serialize long (Py2) or int (Py3). + + :param int attr: Object to be serialized. + :rtype: int/long + :return: serialized long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + :return: serialized date + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + :return: serialized time + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + :return: serialized duration + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: TypeError if format invalid. + :return: serialized rfc + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError as exc: + raise TypeError("RFC1123 object must be valid Datetime object.") from exc + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: SerializationError if format invalid. + :return: serialized iso + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise SerializationError(msg) from err + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise TypeError(msg) from err + + @staticmethod + def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises: SerializationError if format invalid + :return: serialied unix + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError as exc: + raise TypeError("Unix time object must be valid Datetime object.") from exc + + +def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(List[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements + attr, attr_desc, data +): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + :param str attr: The attribute to extract + :param dict attr_desc: The attribute description + :param dict data: The data to extract from + :rtype: object + :returns: The extracted attribute + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{{{}}}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( # pylint: disable=line-too-long + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer(object): + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: Dict[str, type] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, str): + return self.deserialize_data(data, response) + if isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None or data is CoreNull: + return data + try: + attributes = response._attribute_map # type: ignore # pylint: disable=protected-access + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise DeserializationError(msg) from err + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :return: The classified target object and its class name. + :rtype: tuple + """ + if target is None: + return None, None + + if isinstance(target, str): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + :return: Deserialized object. + :rtype: object + """ + try: + return self(target_obj, data, content_type=content_type) + except: # pylint: disable=bare-except + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param obj raw_data: Data to be processed. + :param str content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + :rtype: object + :return: Unpacked content. + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param Response response: The response model class. + :param dict attrs: The deserialized response attributes. + :param dict additional_properties: Additional properties to be set. + :rtype: Response + :return: The instantiated response model. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [ + k for k, v in response._validation.items() if v.get("readonly") # pylint: disable=protected-access + ] + const = [ + k for k, v in response._validation.items() if v.get("constant") # pylint: disable=protected-access + ] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) from err + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) from exp + + def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + :rtype: object + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment + "object", + "[]", + r"{}", + ] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise DeserializationError(msg) from err + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :return: Deserialized iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :return: Deserialized dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :return: Deserialized object. + :rtype: dict + :raises: TypeError if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, str): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :return: Deserialized basic type. + :rtype: str, int, float or bool + :raises: TypeError if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + if isinstance(attr, str): + if attr.lower() in ["true", "1"]: + return True + if attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec # pylint: disable=eval-used + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :return: Deserialized string. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :return: Deserialized enum object. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + try: + return list(enum_obj.__members__.values())[data] + except IndexError as exc: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) from exc + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :return: Deserialized bytearray + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :return: Deserialized base64 string + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :return: Deserialized decimal + :raises: DeserializationError if string format invalid. + :rtype: decimal + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(str(attr)) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise DeserializationError(msg) from err + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :return: Deserialized int + :rtype: long or int + :raises: ValueError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :return: Deserialized duration + :rtype: TimeDelta + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise DeserializationError(msg) from err + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :return: Deserialized date + :rtype: Date + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=0, defaultday=0) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :return: Deserialized time + :rtype: datetime.time + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized RFC datetime + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :return: Deserialized ISO datetime + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise DeserializationError(msg) from err + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :return: Deserialized datetime + :rtype: Datetime + :raises: DeserializationError if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + attr = int(attr) + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise DeserializationError(msg) from err + return date_obj diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py new file mode 100644 index 000000000000..c438829bda41 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py @@ -0,0 +1,18 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING, Union + +if TYPE_CHECKING: + from . import models as _models + from .. import models as _models +AgentsApiResponseFormatOption = Union[ + str, str, "_models.AgentsApiResponseFormatMode", "_models.AgentsApiResponseFormat" +] +MessageAttachmentToolDefinition = Union["_models.CodeInterpreterToolDefinition", "_models.FileSearchToolDefinition"] +AgentsApiToolChoiceOption = Union[str, str, "_models.AgentsApiToolChoiceOptionMode", "_models.AgentsNamedToolChoice"] diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_vendor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_vendor.py new file mode 100644 index 000000000000..e6f010934827 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_vendor.py @@ -0,0 +1,50 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import json +from typing import Any, Dict, IO, List, Mapping, Optional, Tuple, Union + +from ._model_base import Model, SdkJSONEncoder + + +# file-like tuple could be `(filename, IO (or bytes))` or `(filename, IO (or bytes), content_type)` +FileContent = Union[str, bytes, IO[str], IO[bytes]] + +FileType = Union[ + # file (or bytes) + FileContent, + # (filename, file (or bytes)) + Tuple[Optional[str], FileContent], + # (filename, file (or bytes), content_type) + Tuple[Optional[str], FileContent, Optional[str]], +] + + +def serialize_multipart_data_entry(data_entry: Any) -> Any: + if isinstance(data_entry, (list, tuple, dict, Model)): + return json.dumps(data_entry, cls=SdkJSONEncoder, exclude_readonly=True) + return data_entry + + +def prepare_multipart_form_data( + body: Mapping[str, Any], multipart_fields: List[str], data_fields: List[str] +) -> Tuple[List[FileType], Dict[str, Any]]: + files: List[FileType] = [] + data: Dict[str, Any] = {} + for multipart_field in multipart_fields: + multipart_entry = body.get(multipart_field) + if isinstance(multipart_entry, list): + files.extend([(multipart_field, e) for e in multipart_entry]) + elif multipart_entry: + files.append((multipart_field, multipart_entry)) + + for data_field in data_fields: + data_entry = body.get(data_field) + if data_entry: + data[data_field] = serialize_multipart_data_entry(data_entry) + + return files, data diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py new file mode 100644 index 000000000000..be71c81bd282 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0.0b1" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/__init__.py new file mode 100644 index 000000000000..d5beb6bf7f83 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._client import AIProjectClient # type: ignore + +try: + from ._patch import __all__ as _patch_all + from ._patch import * +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AIProjectClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore + +_patch_sdk() diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py new file mode 100644 index 000000000000..4608fc35e6b8 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py @@ -0,0 +1,139 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, TYPE_CHECKING +from typing_extensions import Self + +from azure.core import AsyncPipelineClient +from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest + +from .._serialization import Deserializer, Serializer +from ._configuration import AIProjectClientConfiguration +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + + +class AIProjectClient: + """AIProjectClient. + + :ivar agents: AgentsOperations operations + :vartype agents: azure.ai.projects.aio.operations.AgentsOperations + :ivar connections: ConnectionsOperations operations + :vartype connections: azure.ai.projects.aio.operations.ConnectionsOperations + :ivar evaluations: EvaluationsOperations operations + :vartype evaluations: azure.ai.projects.aio.operations.EvaluationsOperations + :param endpoint: The Azure AI Studio project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``\\\\ , where + :code:`` is the Azure region where the project is deployed (e.g. westus) and + :code:`` is the GUID of the Enterprise private link. Required. + :type endpoint: str + :param subscription_id: The Azure subscription ID. Required. + :type subscription_id: str + :param resource_group_name: The name of the Azure Resource Group. Required. + :type resource_group_name: str + :param project_name: The Azure AI Studio project name. Required. + :type project_name: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-07-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: "AsyncTokenCredential", + **kwargs: Any + ) -> None: + _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" # pylint: disable=line-too-long + self._config = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + **kwargs + ) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) + self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) + self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_configuration.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_configuration.py new file mode 100644 index 000000000000..184943d4f512 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_configuration.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline import policies + +from .._version import VERSION + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + + +class AIProjectClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for AIProjectClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: The Azure AI Studio project endpoint, in the form + ``https://.api.azureml.ms`` or + ``https://..api.azureml.ms``\\ , where :code:`` + is the Azure region where the project is deployed (e.g. westus) and :code:`` + is the GUID of the Enterprise private link. Required. + :type endpoint: str + :param subscription_id: The Azure subscription ID. Required. + :type subscription_id: str + :param resource_group_name: The name of the Azure Resource Group. Required. + :type resource_group_name: str + :param project_name: The Azure AI Studio project name. Required. + :type project_name: str + :param credential: Credential used to authenticate requests to the service. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-07-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: "AsyncTokenCredential", + **kwargs: Any + ) -> None: + api_version: str = kwargs.pop("api_version", "2024-07-01-preview") + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + if resource_group_name is None: + raise ValueError("Parameter 'resource_group_name' must not be None.") + if project_name is None: + raise ValueError("Parameter 'project_name' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + + self.endpoint = endpoint + self.subscription_id = subscription_id + self.resource_group_name = resource_group_name + self.project_name = project_name + self.credential = credential + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "ai-projects/{}".format(VERSION)) + self.polling_interval = kwargs.get("polling_interval", 30) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py new file mode 100644 index 000000000000..d1a7e6d84569 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -0,0 +1,200 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List, Any +from azure.core import AsyncPipelineClient +from azure.core.pipeline import policies +from typing_extensions import Self + +from .._serialization import Deserializer, Serializer +from ._configuration import AIProjectClientConfiguration +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations +from ._client import AIProjectClient as ClientGenerated +from .operations._patch import InferenceOperations + + +class AIProjectClient(ClientGenerated): + + def __init__( + self, + endpoint: str, + subscription_id: str, + resource_group_name: str, + project_name: str, + credential: "AsyncTokenCredential", + **kwargs: Any, + ) -> None: + # TODO: Validate input formats with regex match (e.g. subscription ID) + if not endpoint: + raise ValueError("endpoint is required") + if not subscription_id: + raise ValueError("subscription_id ID is required") + if not resource_group_name: + raise ValueError("resource_group_name is required") + if not project_name: + raise ValueError("project_name is required") + if not credential: + raise ValueError("Credential is required") + if "api_version" in kwargs: + raise ValueError("No support for overriding the API version") + if "credential_scopes" in kwargs: + raise ValueError("No support for overriding the credential scopes") + + kwargs1 = kwargs.copy() + kwargs2 = kwargs.copy() + kwargs3 = kwargs.copy() + + # For Endpoints operations (enumerating connections, getting SAS tokens) + _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + self._config1 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2024-07-01-preview", + credential_scopes=["https://management.azure.com"], + **kwargs1, + ) + _policies1 = kwargs1.pop("policies", None) + if _policies1 is None: + _policies1 = [ + policies.RequestIdPolicy(**kwargs1), + self._config1.headers_policy, + self._config1.user_agent_policy, + self._config1.proxy_policy, + policies.ContentDecodePolicy(**kwargs1), + self._config1.redirect_policy, + self._config1.retry_policy, + self._config1.authentication_policy, + self._config1.custom_hook_policy, + self._config1.logging_policy, + policies.DistributedTracingPolicy(**kwargs1), + policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, + self._config1.http_logging_policy, + ] + self._client1 = AsyncPipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) + + # For Agents operations + _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + self._config2 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2024-07-01-preview", # TODO: Update me + credential_scopes=["https://ml.azure.com"], + **kwargs2, + ) + _policies2 = kwargs2.pop("policies", None) + if _policies2 is None: + _policies2 = [ + policies.RequestIdPolicy(**kwargs2), + self._config2.headers_policy, + self._config2.user_agent_policy, + self._config2.proxy_policy, + policies.ContentDecodePolicy(**kwargs2), + self._config2.redirect_policy, + self._config2.retry_policy, + self._config2.authentication_policy, + self._config2.custom_hook_policy, + self._config2.logging_policy, + policies.DistributedTracingPolicy(**kwargs2), + policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, + self._config2.http_logging_policy, + ] + self._client2 = AsyncPipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) + + # For Cloud Evaluations operations + _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + self._config3 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2024-07-01-preview", # TODO: Update me + credential_scopes=["https://management.azure.com"], # TODO: Update once service changes are ready + **kwargs3, + ) + _policies3 = kwargs3.pop("policies", None) + if _policies3 is None: + _policies3 = [ + policies.RequestIdPolicy(**kwargs3), + self._config3.headers_policy, + self._config3.user_agent_policy, + self._config3.proxy_policy, + policies.ContentDecodePolicy(**kwargs3), + self._config3.redirect_policy, + self._config3.retry_policy, + self._config3.authentication_policy, + self._config3.custom_hook_policy, + self._config3.logging_policy, + policies.DistributedTracingPolicy(**kwargs3), + policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, + self._config3.http_logging_policy, + ] + self._client3 = AsyncPipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + + self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) + self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) + self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) + self.inference = InferenceOperations(self) + + async def close(self) -> None: + await self._client1.close() + await self._client2.close() + await self._client3.close() + + async def __aenter__(self) -> Self: + await self._client1.__aenter__() + await self._client2.__aenter__() + await self._client3.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client1.__aexit__(*exc_details) + await self._client2.__aexit__(*exc_details) + await self._client3.__aexit__(*exc_details) + + @classmethod + def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential", **kwargs) -> "AIProjectClient": + """ + Create an asynchronous AIProjectClient from a connection string. + + :param conn_str: The connection string, copied from your AI Studio project. + """ + if not conn_str: + raise ValueError("Connection string is required") + parts = conn_str.split(";") + if len(parts) != 4: + raise ValueError("Invalid connection string format") + endpoint = "https://" + parts[0] + subscription_id = parts[1] + resource_group_name = parts[2] + project_name = parts[3] + return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) + + +__all__: List[str] = [ + "AIProjectClient", +] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/__init__.py new file mode 100644 index 000000000000..35cf92df96bc --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import AgentsOperations # type: ignore +from ._operations import ConnectionsOperations # type: ignore +from ._operations import EvaluationsOperations # type: ignore + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AgentsOperations", + "ConnectionsOperations", + "EvaluationsOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py new file mode 100644 index 000000000000..1279baa85261 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -0,0 +1,6049 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, TYPE_CHECKING, TypeVar, Union, overload +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import _model_base, models as _models +from ..._model_base import SdkJSONEncoder, _deserialize +from ..._vendor import FileType, prepare_multipart_form_data +from ...operations._operations import ( + build_agents_cancel_run_request, + build_agents_cancel_vector_store_file_batch_request, + build_agents_create_agent_request, + build_agents_create_message_request, + build_agents_create_run_request, + build_agents_create_thread_and_run_request, + build_agents_create_thread_request, + build_agents_create_vector_store_file_batch_request, + build_agents_create_vector_store_file_request, + build_agents_create_vector_store_request, + build_agents_delete_agent_request, + build_agents_delete_file_request, + build_agents_delete_thread_request, + build_agents_delete_vector_store_file_request, + build_agents_delete_vector_store_request, + build_agents_get_agent_request, + build_agents_get_file_content_request, + build_agents_get_file_request, + build_agents_get_message_request, + build_agents_get_run_request, + build_agents_get_run_step_request, + build_agents_get_thread_request, + build_agents_get_vector_store_file_batch_request, + build_agents_get_vector_store_file_request, + build_agents_get_vector_store_request, + build_agents_list_agents_request, + build_agents_list_files_request, + build_agents_list_messages_request, + build_agents_list_run_steps_request, + build_agents_list_runs_request, + build_agents_list_vector_store_file_batch_files_request, + build_agents_list_vector_store_files_request, + build_agents_list_vector_stores_request, + build_agents_modify_vector_store_request, + build_agents_submit_tool_outputs_to_run_request, + build_agents_update_agent_request, + build_agents_update_message_request, + build_agents_update_run_request, + build_agents_update_thread_request, + build_agents_upload_file_request, + build_connections_get_request, + build_connections_list_request, + build_connections_list_secrets_request, + build_evaluations_create_or_replace_schedule_request, + build_evaluations_create_request, + build_evaluations_delete_schedule_request, + build_evaluations_get_request, + build_evaluations_get_schedule_request, + build_evaluations_list_request, + build_evaluations_list_schedule_request, + build_evaluations_update_request, +) + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore + +if TYPE_CHECKING: + from ... import _types +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class AgentsOperations: # pylint: disable=too-many-public-methods + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`agents` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + if model is _Unset: + raise TypeError("missing required argument: model") + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_agent_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_agents( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfAgent: + """Gets a list of agents that were previously created. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfAgent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) + + _request = build_agents_list_agents_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: + """Retrieves an existing agent. + + :param assistant_id: Identifier of the agent. Required. + :type assistant_id: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + _request = build_agents_get_agent_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_agent( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_agent( + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_agent( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_agent( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_agent_request( + assistant_id=assistant_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: + """Deletes an agent. + + :param assistant_id: Identifier of the agent. Required. + :type assistant_id: str + :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_agent_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_thread( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread( + self, + *, + content_type: str = "application/json", + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_thread( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_thread_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: + """Gets information about an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + _request = build_agents_get_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_thread( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_thread( + self, + thread_id: str, + *, + content_type: str = "application/json", + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_thread( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_thread( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_thread_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: + """Deletes an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_message( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_message( + self, + thread_id: str, + *, + role: Union[str, _models.MessageRole], + content: str, + content_type: str = "application/json", + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.projects.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.projects.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_message( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_message( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + role: Union[str, _models.MessageRole] = _Unset, + content: str = _Unset, + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.projects.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.projects.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + if role is _Unset: + raise TypeError("missing required argument: role") + if content is _Unset: + raise TypeError("missing required argument: content") + body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_message_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_messages( + self, + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadMessage: + """Gets a list of messages that exist on a thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword run_id: Filter messages by the run ID that generated them. Default value is None. + :paramtype run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible + with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) + + _request = build_agents_list_messages_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: + """Gets an existing message from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + _request = build_agents_get_message_request( + thread_id=thread_id, + message_id=message_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_message( + self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_message( + self, + thread_id: str, + message_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_message( + self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_message( + self, + thread_id: str, + message_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_message_request( + thread_id=thread_id, + message_id=message_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_run( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "additional_instructions": additional_instructions, + "additional_messages": additional_messages, + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_run_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_runs( + self, + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadRun: + """Gets a list of runs for a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_list_runs_request( + thread_id=thread_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Gets an existing run from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_get_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def update_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_run( + self, + thread_id: str, + run_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if tool_outputs is _Unset: + raise TypeError("missing required argument: tool_outputs") + body = {"stream": stream_parameter, "tool_outputs": tool_outputs} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_submit_tool_outputs_to_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Cancels a run of an in progress thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_cancel_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_thread_and_run( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread_and_run( + self, + *, + assistant_id: str, + content_type: str = "application/json", + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :keyword assistant_id: The ID of the agent for which the thread should be created. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.projects.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_thread_and_run( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_thread_and_run( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent for which the thread should be created. Required. + :paramtype assistant_id: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.projects.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "thread": thread, + "tool_choice": tool_choice, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_thread_and_run_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> _models.RunStep: + """Gets a single run step from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param step_id: Identifier of the run step. Required. + :type step_id: str + :return: RunStep. The RunStep is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.RunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) + + _request = build_agents_get_run_step_request( + thread_id=thread_id, + run_id=run_id, + step_id=step_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.RunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_run_steps( + self, + thread_id: str, + run_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfRunStep: + """Gets a list of run steps from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfRunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) + + _request = build_agents_list_run_steps_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_files( + self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any + ) -> _models.FileListResponse: + """Gets a list of previously uploaded files. + + :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is + None. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :return: FileListResponse. The FileListResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.FileListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) + + _request = build_agents_list_files_request( + purpose=purpose, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileListResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: The file data, in bytes. Required. + :paramtype file: ~azure.ai.projects._vendor.FileType + :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and + Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword filename: The name of the file. Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def upload_file( + self, + body: JSON = _Unset, + *, + file: FileType = _Unset, + purpose: Union[str, _models.FilePurpose] = _Unset, + filename: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Is one of the following types: JSON Required. + :type body: JSON + :keyword file: The file data, in bytes. Required. + :paramtype file: ~azure.ai.projects._vendor.FileType + :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and + Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword filename: The name of the file. Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + if body is _Unset: + if file is _Unset: + raise TypeError("missing required argument: file") + if purpose is _Unset: + raise TypeError("missing required argument: purpose") + body = {"file": file, "filename": filename, "purpose": purpose} + body = {k: v for k, v in body.items() if v is not None} + _body = body.as_dict() if isinstance(body, _model_base.Model) else body + _file_fields: List[str] = ["file"] + _data_fields: List[str] = ["purpose", "filename"] + _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) + + _request = build_agents_upload_file_request( + api_version=self._config.api_version, + files=_files, + data=_data, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: + """Delete a previously uploaded file. + + :param file_id: The ID of the file to delete. Required. + :type file_id: str + :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.FileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _request = build_agents_get_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentResponse: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: FileContentResponse. The FileContentResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.FileContentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileContentResponse] = kwargs.pop("cls", None) + + _request = build_agents_get_file_content_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileContentResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_stores( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStore: + """Returns a list of vector stores. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible + with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfVectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_stores_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "chunking_strategy": chunking_strategy, + "expires_after": expires_after, + "file_ids": file_ids, + "metadata": metadata, + "name": name, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: + """Returns the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def modify_vector_store( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def modify_vector_store( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def modify_vector_store( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def modify_vector_store( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"expires_after": expires_after, "metadata": metadata, "name": name} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_modify_vector_store_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: + """Deletes the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_store_files( + self, + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.projects.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_store_files_request( + vector_store_id=vector_store_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store_file( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file( + self, + vector_store_id: str, + *, + file_id: str, + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_id: Identifier of the file. Required. + :paramtype file_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_id: str = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_id: Identifier of the file. Required. + :paramtype file_id: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + if body is _Unset: + if file_id is _Unset: + raise TypeError("missing required argument: file_id") + body = {"chunking_strategy": chunking_strategy, "file_id": file_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_file_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: + """Retrieves a vector store file. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete_vector_store_file( + self, vector_store_id: str, file_id: str, **kwargs: Any + ) -> _models.VectorStoreFileDeletionStatus: + """Delete a vector store file. This will remove the file from the vector store but the file itself + will not be deleted. + To delete the file, use the delete file endpoint. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_vector_store_file_batch( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch( + self, + vector_store_id: str, + *, + file_ids: List[str], + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file_batch( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: List[str] = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + if body is _Unset: + if file_ids is _Unset: + raise TypeError("missing required argument: file_ids") + body = {"chunking_strategy": chunking_strategy, "file_ids": file_ids} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_file_batch_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Retrieve a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def cancel_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch + as soon as possible. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_agents_cancel_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list_vector_store_file_batch_files( + self, + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files in a batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.projects.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_store_file_batch_files_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class ConnectionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`connections` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def _list( + self, + *, + category: Optional[Union[str, _models.ConnectionType]] = None, + include_all: Optional[bool] = None, + target: Optional[str] = None, + **kwargs: Any + ) -> _models._models.ConnectionsListResponse: + """List the details of all the connections (not including their credentials). + + :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". Default value is None. + :paramtype category: str or ~azure.ai.projects.models.ConnectionType + :keyword include_all: Indicates whether to list datastores. Service default: do not list + datastores. Default value is None. + :paramtype include_all: bool + :keyword target: Target of the workspace connection. Default value is None. + :paramtype target: str + :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models._models.ConnectionsListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) + + _request = build_connections_list_request( + category=category, + include_all=include_all, + target=target, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def _get(self, connection_name: str, **kwargs: Any) -> _models._models.ConnectionsListSecretsResponse: + """Get the details of a single connection, without credentials. + + :param connection_name: Connection Name. Required. + :type connection_name: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models._models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + + _request = build_connections_get_request( + connection_name=connection_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def _list_secrets( + self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: ... + @overload + async def _list_secrets( + self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: ... + @overload + async def _list_secrets( + self, connection_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: ... + + @distributed_trace_async + async def _list_secrets( + self, connection_name: str, body: Union[JSON, IO[bytes]] = _Unset, *, ignored: str = _Unset, **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: + """Get the details of a single connection, including credentials (if available). + + :param connection_name: Connection Name. Required. + :type connection_name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword ignored: The body is ignored. TODO: Can we remove this?. Required. + :paramtype ignored: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models._models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + + if body is _Unset: + if ignored is _Unset: + raise TypeError("missing required argument: ignored") + body = {"ignored": ignored} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_connections_list_secrets_request( + connection_name=connection_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class EvaluationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`evaluations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: + """Resource read operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + _request = build_evaluations_get_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def create( + self, evaluation: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Required. + :type evaluation: ~azure.ai.projects.models.Evaluation + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, evaluation: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Required. + :type evaluation: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, evaluation: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Required. + :type evaluation: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Is one of the following types: Evaluation, JSON, + IO[bytes] Required. + :type evaluation: ~azure.ai.projects.models.Evaluation or JSON or IO[bytes] + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(evaluation, (IOBase, bytes)): + _content = evaluation + else: + _content = json.dumps(evaluation, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list( + self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> AsyncIterable["_models.Evaluation"]: + """Resource list operation template. + + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of Evaluation + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.Evaluation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_list_request( + top=top, + skip=skip, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @overload + async def update( + self, + id: str, + resource: _models.Evaluation, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: ~azure.ai.projects.models.Evaluation + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update( + self, id: str, resource: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update( + self, id: str, resource: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update( + self, id: str, resource: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Is one of the following types: Evaluation, JSON, + IO[bytes] Required. + :type resource: ~azure.ai.projects.models.Evaluation or JSON or IO[bytes] + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + content_type = content_type or "application/merge-patch+json" + _content = None + if isinstance(resource, (IOBase, bytes)): + _content = resource + else: + _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_update_request( + id=id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_schedule(self, name: str, **kwargs: Any) -> _models.EvaluationSchedule: + """Resource read operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) + + _request = build_evaluations_get_schedule_request( + name=name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + async def create_or_replace_schedule( + self, name: str, resource: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Required. + :type resource: ~azure.ai.projects.models.EvaluationSchedule + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_replace_schedule( + self, name: str, resource: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Required. + :type resource: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_replace_schedule( + self, name: str, resource: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Required. + :type resource: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_or_replace_schedule( + self, name: str, resource: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Is one of the following types: EvaluationSchedule, + JSON, IO[bytes] Required. + :type resource: ~azure.ai.projects.models.EvaluationSchedule or JSON or IO[bytes] + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(resource, (IOBase, bytes)): + _content = resource + else: + _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_create_or_replace_schedule_request( + name=name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_schedule( + self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> AsyncIterable["_models.EvaluationSchedule"]: + """Resource list operation template. + + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of EvaluationSchedule + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.projects.models.EvaluationSchedule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.EvaluationSchedule]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_list_schedule_request( + top=top, + skip=skip, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.EvaluationSchedule], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @distributed_trace_async + async def delete_schedule(self, name: str, **kwargs: Any) -> None: + """Resource delete operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_evaluations_delete_schedule_request( + name=name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py new file mode 100644 index 000000000000..7ef50e724fba --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -0,0 +1,1977 @@ +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from ..._vendor import FileType +import io +import logging +import os +import time +from typing import IO, Any, AsyncIterator, Dict, List, Iterable, MutableMapping, Optional, Union, cast, overload + +from azure.ai.projects import _types +from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated +from ._operations import AgentsOperations as AgentsOperationsGenerated +from ...models._patch import ConnectionProperties +from ...models._enums import AuthenticationType, ConnectionType, FilePurpose +from ...models._models import ConnectionsListSecretsResponse, ConnectionsListResponse +from ... import models as _models +from azure.core.tracing.decorator_async import distributed_trace_async + +logger = logging.getLogger(__name__) + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() + + +class InferenceOperations: + + def __init__(self, outer_instance): + self.outer_instance = outer_instance + + @distributed_trace_async + async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": + """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for the default + Serverless connection. The Serverless connection must have a Chat Completions AI model deployment. + The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. + + :return: An authenticated chat completions client + :rtype: ~azure.ai.inference.models.ChatCompletionsClient + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connection = await self.outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + ) + if not connection: + raise ValueError("No serverless connection found") + + try: + from azure.ai.inference.aio import ChatCompletionsClient + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" + ) + + if connection.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" + ) + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) + ) + elif connection.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" + ) + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) + elif connection.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" + ) + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) + else: + raise ValueError("Unknown authentication type") + + return client + + @distributed_trace_async + async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": + """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for the default + Serverless connection. The Serverless connection must have a Text Embeddings AI model deployment. + The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. + + :return: An authenticated chat completions client + :rtype: ~azure.ai.inference.models.EmbeddingsClient + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connection = await self.outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + ) + if not connection: + raise ValueError("No serverless connection found") + + try: + from azure.ai.inference.aio import EmbeddingsClient + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" + ) + + if connection.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" + ) + from azure.core.credentials import AzureKeyCredential + + client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + elif connection.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" + ) + client = EmbeddingsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) + elif connection.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" + ) + client = EmbeddingsClient(endpoint=connection.connection_url, credential=connection.token_credential) + else: + raise ValueError("Unknown authentication type") + + return client + + @distributed_trace_async + async def get_azure_openai_client(self, **kwargs) -> "AsyncAzureOpenAI": + """Get an authenticated AsyncAzureOpenAI client (from the `openai` package) for the default + Azure OpenAI connection. The package `openai` must be installed prior to calling this method. + + :return: An authenticated AsyncAzureOpenAI client + :rtype: ~openai.AsyncAzureOpenAI + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connection = await self.outer_instance.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs + ) + if not connection: + raise ValueError("No Azure OpenAI connection found.") + + try: + from openai import AsyncAzureOpenAI + except ModuleNotFoundError as _: + raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai-async'") + + # Pick latest GA version from the "Data plane - Inference" row in the table + # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + AZURE_OPENAI_API_VERSION = "2024-06-01" + + if connection.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" + ) + client = AsyncAzureOpenAI( + api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION + ) + elif connection.authentication_type == AuthenticationType.AAD: + logger.debug( + "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" + ) + try: + from azure.identity import get_bearer_token_provider + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "azure.identity package not installed. Please install it using 'pip install azure.identity'" + ) + client = AsyncAzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider( + connection.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=connection.endpoint_url, + api_version=AZURE_OPENAI_API_VERSION, + ) + elif connection.authentication_type == AuthenticationType.SAS: + logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") + client = AsyncAzureOpenAI( + azure_ad_token_provider=get_bearer_token_provider( + connection.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=connection.endpoint_url, + api_version=AZURE_OPENAI_API_VERSION, + ) + else: + raise ValueError("Unknown authentication type") + + return client + + +class ConnectionsOperations(ConnectionsOperationsGenerated): + + @distributed_trace_async + async def get_default( + self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any + ) -> ConnectionProperties: + """Get the properties of the default connection of a certain connection type, with or without + populating authentication credentials. + + :param connection_type: The connection type. Required. + :type connection_type: ~azure.ai.projects.models._models.ConnectionType + :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :type with_credentials: bool + :return: The connection properties + :rtype: ~azure.ai.projects.models._models.ConnectionProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + if not connection_type: + raise ValueError("You must specify an connection type") + # Since there is no notion of default connection at the moment, list all connections in the category + # and return the first one + connection_properties_list = await self.list(connection_type=connection_type, **kwargs) + if len(connection_properties_list) > 0: + if with_credentials: + return await self.get( + connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs + ) + else: + return connection_properties_list[0] + else: + return None + + @distributed_trace_async + async def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: + """Get the properties of a single connection, given its connection name, with or without + populating authentication credentials. + + :param connection_name: Connection Name. Required. + :type connection_name: str + :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :type with_credentials: bool + :return: The connection properties + :rtype: ~azure.ai.projects.models._models.ConnectionProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + if not connection_name: + raise ValueError("Endpoint name cannot be empty") + if with_credentials: + connection: ConnectionsListSecretsResponse = await self._list_secrets( + connection_name=connection_name, ignored="ignore", **kwargs + ) + if connection.properties.auth_type == AuthenticationType.AAD: + return ConnectionProperties(connection=connection, token_credential=self._config.credential) + elif connection.properties.auth_type == AuthenticationType.SAS: + from ...models._patch import SASTokenCredential + + token_credential = SASTokenCredential( + sas_token=connection.properties.credentials.sas, + credential=self._config.credential, + subscription_id=self._config.subscription_id, + resource_group_name=self._config.resource_group_name, + project_name=self._config.project_name, + connection_name=connection_name, + ) + return ConnectionProperties(connection=connection, token_credential=token_credential) + + return ConnectionProperties(connection=connection) + else: + return ConnectionProperties(connection=await self._get(connection_name=connection_name, **kwargs)) + + @distributed_trace_async + async def list( + self, *, connection_type: ConnectionType | None = None, **kwargs: Any + ) -> Iterable[ConnectionProperties]: + """List the properties of all connections, or all connections of a certain connection type. + + :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. + If not provided, all connections are listed. + :type connection_type: ~azure.ai.projects.models._models.ConnectionType + :return: A list of connection properties + :rtype: Iterable[~azure.ai.projects.models._models.ConnectionProperties] + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connections_list: ConnectionsListResponse = await self._list( + include_all=True, category=connection_type, **kwargs + ) + + # Iterate to create the simplified result property + connection_properties_list: List[ConnectionProperties] = [] + for connection in connections_list.value: + connection_properties_list.append(ConnectionProperties(connection=connection)) + + return connection_properties_list + + +class AgentsOperations(AgentsOperationsGenerated): + + @overload + async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_agent( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.AsyncToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.Agent: + """ + Creates a new agent with various configurations, delegating to the generated operations. + + :param body: JSON or IO[bytes]. Required if `model` is not provided. + :param model: The ID of the model to use. Required if `body` is not provided. + :param name: The name of the new agent. + :param description: A description for the new agent. + :param instructions: System instructions for the agent. + :param tools: List of tools definitions for the agent. + :param tool_resources: Resources used by the agent's tools. + :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). + :param temperature: Sampling temperature for generating agent responses. + :param top_p: Nucleus sampling parameter. + :param response_format: Response format for tool calls. + :param metadata: Key/value pairs for storing additional information. + :param content_type: Content type of the body. + :param kwargs: Additional parameters. + :return: An Agent object. + :raises: HttpResponseError for HTTP errors. + """ + if body is not _Unset: + if isinstance(body, io.IOBase): + return await super().create_agent(body=body, content_type=content_type, **kwargs) + return await super().create_agent(body=body, **kwargs) + + if toolset is not None: + self._toolset = toolset + tools = toolset.definitions + tool_resources = toolset.resources + + return await super().create_agent( + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + def get_toolset(self) -> Optional[_models.AsyncToolSet]: + """ + Get the toolset for the agent. + + :return: The toolset for the agent. If not set, returns None. + :rtype: ~azure.ai.projects.models.AsyncToolSet + """ + if hasattr(self, "_toolset"): + return self._toolset + return None + + @overload + async def create_run( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_run( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=False, + stream=False, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # If streaming is enabled, return the custom stream object + return await response + + @distributed_trace_async + async def create_and_process_run( + self, + thread_id: str, + assistant_id: str, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: int = 1, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread and processes the run. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or + ~azure.ai.projects.models.AgentsApiResponseFormatMode or + ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: The time in seconds to wait between polling the service for run status. + Default value is 1. + :paramtype sleep_interval: int + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + # Create and initiate the run with additional parameters + run = await self.create_run( + thread_id=thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + # Monitor and process the run status + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(sleep_interval) + run = await self.get_run(thread_id=thread_id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logging.warning("No tool calls provided - cancelling run") + await self.cancel_run(thread_id=thread_id, run_id=run.id) + break + + toolset = self.get_toolset() + if toolset: + tool_outputs = await toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + logging.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + await self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) + + logging.info("Current run status: %s", run.status) + + return run + + @overload + def create_stream( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AsyncAgentRunStream: + """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_stream( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AsyncAgentRunStream: + """Creates a new stream for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_stream( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AsyncAgentRunStream: + """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AsyncAgentRunStream: + """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=True, + stream=True, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) + + return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :param event_handler: The event handler to use for processing events during the run. + :param kwargs: Additional parameters. + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # If streaming is enabled, return the custom stream object + return await response + + @overload + async def submit_tool_outputs_to_stream( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AsyncAgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AsyncAgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AsyncAgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def submit_tool_outputs_to_stream( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AsyncAgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AsyncAgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :param event_handler: The event handler to use for processing events during the run. + :param kwargs: Additional parameters. + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AsyncAgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # Cast the response to Iterator[bytes] for type correctness + response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) + + return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + + async def _handle_submit_tool_outputs( + self, run: _models.ThreadRun, event_handler: Optional[_models.AsyncAgentEventHandler] = None + ) -> None: + if isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logger.debug("No tool calls to execute.") + return + + toolset = self.get_toolset() + if toolset: + tool_outputs = await toolset.execute_tool_calls(tool_calls) + else: + logger.warning("Toolset is not available in the client.") + return + + logger.info(f"Tool outputs: {tool_outputs}") + if tool_outputs: + async with await self.submit_tool_outputs_to_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler + ) as stream: + await stream.until_done() + + @overload + async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.projects._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file( + self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def upload_file( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :param file: File content. Required if `body` and `purpose` are not provided. + :param file_path: Path to the file. Required if `body` and `purpose` are not provided. + :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :param filename: The name of the file. + :param kwargs: Additional parameters. + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + return await super().upload_file(body=body, **kwargs) + + if isinstance(purpose, FilePurpose): + purpose = purpose.value + + if file is not None and purpose is not None: + return await super().upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + + if file_path is not None and purpose is not None: + if not os.path.isfile(file_path): + raise FileNotFoundError(f"The file path provided does not exist: {file_path}") + + try: + with open(file_path, "rb") as f: + content = f.read() + + # Determine filename and create correct FileType + base_filename = filename or os.path.basename(file_path) + file_content: FileType = (base_filename, content) + + return await super().upload_file(file=file_content, purpose=purpose, **kwargs) + except IOError as e: + raise IOError(f"Unable to read file: {file_path}. Reason: {str(e)}") + + raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") + + @overload + async def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file_and_poll( + self, + *, + file: FileType, + purpose: Union[str, _models.FilePurpose], + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.projects._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def upload_file_and_poll( + self, file_path: str, *, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def upload_file_and_poll( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :param file: File content. Required if `body` and `purpose` are not provided. + :param file_path: Path to the file. Required if `body` and `purpose` are not provided. + :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :param filename: The name of the file. + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :param kwargs: Additional parameters. + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + uploaded_file = await self.upload_file(body=body, **kwargs) + elif file is not None and purpose is not None: + uploaded_file = await self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + elif file_path is not None and purpose is not None: + uploaded_file = await self.upload_file(file_path=file_path, purpose=purpose, **kwargs) + else: + raise ValueError( + "Invalid parameters for upload_file_and_poll. Please provide either 'body', " + "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." + ) + + while uploaded_file.status in ["uploaded", "pending", "running"]: + time.sleep(sleep_interval) + uploaded_file = await self.get_file(uploaded_file.id) + + return uploaded_file + + @overload + async def create_vector_store_and_poll( + self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_and_poll( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_and_poll( + self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_and_poll( + self, + body: Union[JSON, IO[bytes], None] = None, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not None: + vector_store = await self.create_vector_store(body=body, content_type=content_type, **kwargs) + elif file_ids is not None or (name is not None and expires_after is not None): + vector_store = await self.create_vector_store( + content_type=content_type, + file_ids=file_ids, + name=name, + expires_after=expires_after, + chunking_strategy=chunking_strategy, + metadata=metadata, + **kwargs, + ) + else: + raise ValueError( + "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " + "'file_ids', or 'name' and 'expires_after'." + ) + + while vector_store.status == "in_progress": + time.sleep(sleep_interval) + vector_store = await self.get_vector_store(vector_store.id) + + return vector_store + + @overload + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + *, + file_ids: List[str], + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = None, + *, + file_ids: List[str] = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is None: + vector_store_file_batch = await super().create_vector_store_file_batch( + vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs + ) + else: + content_type = kwargs.get("content_type", "application/json") + vector_store_file_batch = await super().create_vector_store_file_batch( + body=body, content_type=content_type, **kwargs + ) + + while vector_store_file_batch.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file_batch = await super().get_vector_store_file_batch( + vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id + ) + + return vector_store_file_batch + + +__all__: List[str] = [ + "AgentsOperations", + "ConnectionsOperations", + "InferenceOperations", +] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py new file mode 100644 index 000000000000..f6ed04e4637b --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py @@ -0,0 +1,376 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + + +from ._models import ( # type: ignore + Agent, + AgentDeletionStatus, + AgentThread, + AgentThreadCreationOptions, + AgentsApiResponseFormat, + AgentsNamedToolChoice, + ApplicationInsightsConfiguration, + AzureAISearchResource, + AzureAISearchToolDefinition, + BingGroundingToolDefinition, + CodeInterpreterToolDefinition, + CodeInterpreterToolResource, + ConnectionListResource, + ConnectionResource, + CronTrigger, + Dataset, + Evaluation, + EvaluationSchedule, + EvaluatorConfiguration, + FileContentResponse, + FileDeletionStatus, + FileListResponse, + FileSearchToolDefinition, + FileSearchToolDefinitionDetails, + FileSearchToolResource, + FunctionDefinition, + FunctionName, + FunctionToolDefinition, + IndexResource, + InputData, + MessageAttachment, + MessageContent, + MessageDelta, + MessageDeltaChunk, + MessageDeltaContent, + MessageDeltaImageFileContent, + MessageDeltaImageFileContentObject, + MessageDeltaTextAnnotation, + MessageDeltaTextContent, + MessageDeltaTextContentObject, + MessageDeltaTextFileCitationAnnotation, + MessageDeltaTextFileCitationAnnotationObject, + MessageDeltaTextFilePathAnnotation, + MessageDeltaTextFilePathAnnotationObject, + MessageImageFileContent, + MessageImageFileDetails, + MessageIncompleteDetails, + MessageTextAnnotation, + MessageTextContent, + MessageTextDetails, + MessageTextFileCitationAnnotation, + MessageTextFileCitationDetails, + MessageTextFilePathAnnotation, + MessageTextFilePathDetails, + MicrosoftFabricToolDefinition, + OpenAIFile, + OpenAIPageableListOfAgent, + OpenAIPageableListOfRunStep, + OpenAIPageableListOfThreadMessage, + OpenAIPageableListOfThreadRun, + OpenAIPageableListOfVectorStore, + OpenAIPageableListOfVectorStoreFile, + RecurrenceSchedule, + RecurrenceTrigger, + RequiredAction, + RequiredFunctionToolCall, + RequiredFunctionToolCallDetails, + RequiredToolCall, + RunCompletionUsage, + RunError, + RunStep, + RunStepAzureAISearchToolCall, + RunStepBingGroundingToolCall, + RunStepCodeInterpreterImageOutput, + RunStepCodeInterpreterImageReference, + RunStepCodeInterpreterLogOutput, + RunStepCodeInterpreterToolCall, + RunStepCodeInterpreterToolCallDetails, + RunStepCodeInterpreterToolCallOutput, + RunStepCompletionUsage, + RunStepDelta, + RunStepDeltaChunk, + RunStepDeltaCodeInterpreterDetailItemObject, + RunStepDeltaCodeInterpreterImageOutput, + RunStepDeltaCodeInterpreterImageOutputObject, + RunStepDeltaCodeInterpreterLogOutput, + RunStepDeltaCodeInterpreterOutput, + RunStepDeltaCodeInterpreterToolCall, + RunStepDeltaDetail, + RunStepDeltaFileSearchToolCall, + RunStepDeltaFunction, + RunStepDeltaFunctionToolCall, + RunStepDeltaMessageCreation, + RunStepDeltaMessageCreationObject, + RunStepDeltaToolCall, + RunStepDeltaToolCallObject, + RunStepDetails, + RunStepError, + RunStepFileSearchToolCall, + RunStepFunctionToolCall, + RunStepFunctionToolCallDetails, + RunStepMessageCreationDetails, + RunStepMessageCreationReference, + RunStepMicrosoftFabricToolCall, + RunStepSharepointToolCall, + RunStepToolCall, + RunStepToolCallDetails, + SamplingStrategy, + SharepointToolDefinition, + SubmitToolOutputsAction, + SubmitToolOutputsDetails, + SystemData, + ThreadDeletionStatus, + ThreadMessage, + ThreadMessageOptions, + ThreadRun, + ToolDefinition, + ToolOutput, + ToolResources, + Trigger, + TruncationObject, + UpdateCodeInterpreterToolResourceOptions, + UpdateFileSearchToolResourceOptions, + UpdateToolResourcesOptions, + VectorStore, + VectorStoreAutoChunkingStrategyRequest, + VectorStoreAutoChunkingStrategyResponse, + VectorStoreChunkingStrategyRequest, + VectorStoreChunkingStrategyResponse, + VectorStoreDeletionStatus, + VectorStoreExpirationPolicy, + VectorStoreFile, + VectorStoreFileBatch, + VectorStoreFileCount, + VectorStoreFileDeletionStatus, + VectorStoreFileError, + VectorStoreStaticChunkingStrategyOptions, + VectorStoreStaticChunkingStrategyRequest, + VectorStoreStaticChunkingStrategyResponse, +) + +from ._enums import ( # type: ignore + AgentStreamEvent, + AgentsApiResponseFormatMode, + AgentsApiToolChoiceOptionMode, + AgentsNamedToolChoiceType, + ApiResponseFormat, + AuthenticationType, + ConnectionType, + DoneEvent, + ErrorEvent, + FilePurpose, + FileState, + Frequency, + IncompleteRunDetails, + ListSortOrder, + MessageIncompleteDetailsReason, + MessageRole, + MessageStatus, + MessageStreamEvent, + RunStatus, + RunStepErrorCode, + RunStepStatus, + RunStepStreamEvent, + RunStepType, + RunStreamEvent, + ThreadStreamEvent, + TruncationStrategy, + VectorStoreChunkingStrategyRequestType, + VectorStoreChunkingStrategyResponseType, + VectorStoreExpirationPolicyAnchor, + VectorStoreFileBatchStatus, + VectorStoreFileErrorCode, + VectorStoreFileStatus, + VectorStoreFileStatusFilter, + VectorStoreStatus, + WeekDays, +) +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "Agent", + "AgentDeletionStatus", + "AgentThread", + "AgentThreadCreationOptions", + "AgentsApiResponseFormat", + "AgentsNamedToolChoice", + "ApplicationInsightsConfiguration", + "AzureAISearchResource", + "AzureAISearchToolDefinition", + "BingGroundingToolDefinition", + "CodeInterpreterToolDefinition", + "CodeInterpreterToolResource", + "ConnectionListResource", + "ConnectionResource", + "CronTrigger", + "Dataset", + "Evaluation", + "EvaluationSchedule", + "EvaluatorConfiguration", + "FileContentResponse", + "FileDeletionStatus", + "FileListResponse", + "FileSearchToolDefinition", + "FileSearchToolDefinitionDetails", + "FileSearchToolResource", + "FunctionDefinition", + "FunctionName", + "FunctionToolDefinition", + "IndexResource", + "InputData", + "MessageAttachment", + "MessageContent", + "MessageDelta", + "MessageDeltaChunk", + "MessageDeltaContent", + "MessageDeltaImageFileContent", + "MessageDeltaImageFileContentObject", + "MessageDeltaTextAnnotation", + "MessageDeltaTextContent", + "MessageDeltaTextContentObject", + "MessageDeltaTextFileCitationAnnotation", + "MessageDeltaTextFileCitationAnnotationObject", + "MessageDeltaTextFilePathAnnotation", + "MessageDeltaTextFilePathAnnotationObject", + "MessageImageFileContent", + "MessageImageFileDetails", + "MessageIncompleteDetails", + "MessageTextAnnotation", + "MessageTextContent", + "MessageTextDetails", + "MessageTextFileCitationAnnotation", + "MessageTextFileCitationDetails", + "MessageTextFilePathAnnotation", + "MessageTextFilePathDetails", + "MicrosoftFabricToolDefinition", + "OpenAIFile", + "OpenAIPageableListOfAgent", + "OpenAIPageableListOfRunStep", + "OpenAIPageableListOfThreadMessage", + "OpenAIPageableListOfThreadRun", + "OpenAIPageableListOfVectorStore", + "OpenAIPageableListOfVectorStoreFile", + "RecurrenceSchedule", + "RecurrenceTrigger", + "RequiredAction", + "RequiredFunctionToolCall", + "RequiredFunctionToolCallDetails", + "RequiredToolCall", + "RunCompletionUsage", + "RunError", + "RunStep", + "RunStepAzureAISearchToolCall", + "RunStepBingGroundingToolCall", + "RunStepCodeInterpreterImageOutput", + "RunStepCodeInterpreterImageReference", + "RunStepCodeInterpreterLogOutput", + "RunStepCodeInterpreterToolCall", + "RunStepCodeInterpreterToolCallDetails", + "RunStepCodeInterpreterToolCallOutput", + "RunStepCompletionUsage", + "RunStepDelta", + "RunStepDeltaChunk", + "RunStepDeltaCodeInterpreterDetailItemObject", + "RunStepDeltaCodeInterpreterImageOutput", + "RunStepDeltaCodeInterpreterImageOutputObject", + "RunStepDeltaCodeInterpreterLogOutput", + "RunStepDeltaCodeInterpreterOutput", + "RunStepDeltaCodeInterpreterToolCall", + "RunStepDeltaDetail", + "RunStepDeltaFileSearchToolCall", + "RunStepDeltaFunction", + "RunStepDeltaFunctionToolCall", + "RunStepDeltaMessageCreation", + "RunStepDeltaMessageCreationObject", + "RunStepDeltaToolCall", + "RunStepDeltaToolCallObject", + "RunStepDetails", + "RunStepError", + "RunStepFileSearchToolCall", + "RunStepFunctionToolCall", + "RunStepFunctionToolCallDetails", + "RunStepMessageCreationDetails", + "RunStepMessageCreationReference", + "RunStepMicrosoftFabricToolCall", + "RunStepSharepointToolCall", + "RunStepToolCall", + "RunStepToolCallDetails", + "SamplingStrategy", + "SharepointToolDefinition", + "SubmitToolOutputsAction", + "SubmitToolOutputsDetails", + "SystemData", + "ThreadDeletionStatus", + "ThreadMessage", + "ThreadMessageOptions", + "ThreadRun", + "ToolDefinition", + "ToolOutput", + "ToolResources", + "Trigger", + "TruncationObject", + "UpdateCodeInterpreterToolResourceOptions", + "UpdateFileSearchToolResourceOptions", + "UpdateToolResourcesOptions", + "VectorStore", + "VectorStoreAutoChunkingStrategyRequest", + "VectorStoreAutoChunkingStrategyResponse", + "VectorStoreChunkingStrategyRequest", + "VectorStoreChunkingStrategyResponse", + "VectorStoreDeletionStatus", + "VectorStoreExpirationPolicy", + "VectorStoreFile", + "VectorStoreFileBatch", + "VectorStoreFileCount", + "VectorStoreFileDeletionStatus", + "VectorStoreFileError", + "VectorStoreStaticChunkingStrategyOptions", + "VectorStoreStaticChunkingStrategyRequest", + "VectorStoreStaticChunkingStrategyResponse", + "AgentStreamEvent", + "AgentsApiResponseFormatMode", + "AgentsApiToolChoiceOptionMode", + "AgentsNamedToolChoiceType", + "ApiResponseFormat", + "AuthenticationType", + "ConnectionType", + "DoneEvent", + "ErrorEvent", + "FilePurpose", + "FileState", + "Frequency", + "IncompleteRunDetails", + "ListSortOrder", + "MessageIncompleteDetailsReason", + "MessageRole", + "MessageStatus", + "MessageStreamEvent", + "RunStatus", + "RunStepErrorCode", + "RunStepStatus", + "RunStepStreamEvent", + "RunStepType", + "RunStreamEvent", + "ThreadStreamEvent", + "TruncationStrategy", + "VectorStoreChunkingStrategyRequestType", + "VectorStoreChunkingStrategyResponseType", + "VectorStoreExpirationPolicyAnchor", + "VectorStoreFileBatchStatus", + "VectorStoreFileErrorCode", + "VectorStoreFileStatus", + "VectorStoreFileStatusFilter", + "VectorStoreStatus", + "WeekDays", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py new file mode 100644 index 000000000000..7ca731b7639b --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py @@ -0,0 +1,513 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class AgentsApiResponseFormatMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Represents the mode in which the model will handle the return format of a tool call.""" + + AUTO = "auto" + """Default value. Let the model handle the return format.""" + NONE = "none" + """Setting the value to ``none``\\ , will result in a 400 Bad request.""" + + +class AgentsApiToolChoiceOptionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies how the tool choice will be used.""" + + NONE = "none" + """The model will not call a function and instead generates a message.""" + AUTO = "auto" + """The model can pick between generating a message or calling a function.""" + + +class AgentsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Available tool types for agents named tools.""" + + FUNCTION = "function" + """Tool type ``function``""" + CODE_INTERPRETER = "code_interpreter" + """Tool type ``code_interpreter``""" + FILE_SEARCH = "file_search" + """Tool type ``file_search``""" + BING_GROUNDING = "bing_grounding" + """Tool type ``bing_grounding``""" + MICROSOFT_FABRIC = "microsoft_fabric" + """Tool type ``microsoft_fabric``""" + SHAREPOINT = "sharepoint" + """Tool type ``sharepoint``""" + AZURE_AI_SEARCH = "azure_ai_search" + """Tool type ``azure_ai_search``""" + + +class AgentStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Each event in a server-sent events stream has an ``event`` and ``data`` property: + + .. code-block:: + + event: thread.created + data: {"id": "thread_123", "object": "thread", ...} + + We emit events whenever a new object is created, transitions to a new state, or is being + streamed in parts (deltas). For example, we emit ``thread.run.created`` when a new run + is created, ``thread.run.completed`` when a run completes, and so on. When an Agent chooses + to create a message during a run, we emit a ``thread.message.created event``\\ , a + ``thread.message.in_progress`` event, many ``thread.message.delta`` events, and finally a + ``thread.message.completed`` event. + + We may add additional events over time, so we recommend handling unknown events gracefully + in your code. + """ + + THREAD_CREATED = "thread.created" + """Event sent when a new thread is created. The data of this event is of type AgentThread""" + THREAD_RUN_CREATED = "thread.run.created" + """Event sent when a new run is created. The data of this event is of type ThreadRun""" + THREAD_RUN_QUEUED = "thread.run.queued" + """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" + THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" + """Event sent when a run moves to ``in_progress`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" + """Event sent when a run moves to ``requires_action`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_COMPLETED = "thread.run.completed" + """Event sent when a run is completed. The data of this event is of type ThreadRun""" + THREAD_RUN_FAILED = "thread.run.failed" + """Event sent when a run fails. The data of this event is of type ThreadRun""" + THREAD_RUN_CANCELLING = "thread.run.cancelling" + """Event sent when a run moves to ``cancelling`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_CANCELLED = "thread.run.cancelled" + """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" + THREAD_RUN_EXPIRED = "thread.run.expired" + """Event sent when a run is expired. The data of this event is of type ThreadRun""" + THREAD_RUN_STEP_CREATED = "thread.run.step.created" + """Event sent when a new thread run step is created. The data of this event is of type RunStep""" + THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" + """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type + RunStep""" + THREAD_RUN_STEP_DELTA = "thread.run.step.delta" + """Event sent when a run step is being streamed. The data of this event is of type + RunStepDeltaChunk""" + THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" + """Event sent when a run step is completed. The data of this event is of type RunStep""" + THREAD_RUN_STEP_FAILED = "thread.run.step.failed" + """Event sent when a run step fails. The data of this event is of type RunStep""" + THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" + """Event sent when a run step is cancelled. The data of this event is of type RunStep""" + THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" + """Event sent when a run step is expired. The data of this event is of type RunStep""" + THREAD_MESSAGE_CREATED = "thread.message.created" + """Event sent when a new message is created. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" + """Event sent when a message moves to ``in_progress`` status. The data of this event is of type + ThreadMessage""" + THREAD_MESSAGE_DELTA = "thread.message.delta" + """Event sent when a message is being streamed. The data of this event is of type + MessageDeltaChunk""" + THREAD_MESSAGE_COMPLETED = "thread.message.completed" + """Event sent when a message is completed. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" + """Event sent before a message is completed. The data of this event is of type ThreadMessage""" + ERROR = "error" + """Event sent when an error occurs, such as an internal server error or a timeout.""" + DONE = "done" + """Event sent when the stream is done.""" + + +class ApiResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible API response formats.""" + + TEXT = "text" + """``text`` format should be used for requests involving any sort of ToolCall.""" + JSON_OBJECT = "json_object" + """Using ``json_object`` format will limit the usage of ToolCall to only functions.""" + + +class AuthenticationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Authentication type used by Azure AI service to connect to another service.""" + + API_KEY = "ApiKey" + """API Key authentication""" + AAD = "AAD" + """Entra ID authentication""" + SAS = "SAS" + """Shared Access Signature (SAS) authentication""" + + +class ConnectionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The Type (or category) of the connection.""" + + AZURE_OPEN_AI = "AzureOpenAI" + """Azure OpenAI service""" + SERVERLESS = "Serverless" + """Serverless API service""" + AZURE_BLOB_STORAGE = "AzureBlob" + """Azure Blob Storage""" + AI_SERVICES = "AIServices" + """Azure AI Services""" + + +class DoneEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Terminal event indicating the successful end of a stream.""" + + DONE = "done" + """Event sent when the stream is done.""" + + +class ErrorEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Terminal event indicating a server side error while streaming.""" + + ERROR = "error" + """Event sent when an error occurs, such as an internal server error or a timeout.""" + + +class FilePurpose(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible values denoting the intended usage of a file.""" + + FINE_TUNE = "fine-tune" + """Indicates a file is used for fine tuning input.""" + FINE_TUNE_RESULTS = "fine-tune-results" + """Indicates a file is used for fine tuning results.""" + AGENTS = "assistants" + """Indicates a file is used as input to agents.""" + AGENTS_OUTPUT = "assistants_output" + """Indicates a file is used as output by agents.""" + BATCH = "batch" + """Indicates a file is used as input to .""" + BATCH_OUTPUT = "batch_output" + """Indicates a file is used as output by a vector store batch operation.""" + VISION = "vision" + """Indicates a file is used as input to a vision operation.""" + + +class FileState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The state of the file.""" + + UPLOADED = "uploaded" + """The file has been uploaded but it's not yet processed. This state is not returned by Azure + OpenAI and exposed only for + compatibility. It can be categorized as an inactive state.""" + PENDING = "pending" + """The operation was created and is not queued to be processed in the future. It can be + categorized as an inactive state.""" + RUNNING = "running" + """The operation has started to be processed. It can be categorized as an active state.""" + PROCESSED = "processed" + """The operation has successfully processed and is ready for consumption. It can be categorized as + a terminal state.""" + ERROR = "error" + """The operation has completed processing with a failure and cannot be further consumed. It can be + categorized as a terminal state.""" + DELETING = "deleting" + """The entity is in the process to be deleted. This state is not returned by Azure OpenAI and + exposed only for compatibility. + It can be categorized as an active state.""" + DELETED = "deleted" + """The entity has been deleted but may still be referenced by other entities predating the + deletion. It can be categorized as a + terminal state.""" + + +class Frequency(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Frequency of the schedule - day, week, month, hour, minute.""" + + MONTH = "Month" + WEEK = "Week" + DAY = "Day" + HOUR = "Hour" + MINUTE = "Minute" + + +class IncompleteRunDetails(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The reason why the run is incomplete. This will point to which specific token limit was reached + over the course of the run. + """ + + MAX_COMPLETION_TOKENS = "max_completion_tokens" + """Maximum completion tokens exceeded""" + MAX_PROMPT_TOKENS = "max_prompt_tokens" + """Maximum prompt tokens exceeded""" + + +class ListSortOrder(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The available sorting options when requesting a list of response objects.""" + + ASCENDING = "asc" + """Specifies an ascending sort order.""" + DESCENDING = "desc" + """Specifies a descending sort order.""" + + +class MessageIncompleteDetailsReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """A set of reasons describing why a message is marked as incomplete.""" + + CONTENT_FILTER = "content_filter" + """The run generating the message was terminated due to content filter flagging.""" + MAX_TOKENS = "max_tokens" + """The run generating the message exhausted available tokens before completion.""" + RUN_CANCELLED = "run_cancelled" + """The run generating the message was cancelled before completion.""" + RUN_FAILED = "run_failed" + """The run generating the message failed.""" + RUN_EXPIRED = "run_expired" + """The run generating the message expired.""" + + +class MessageRole(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible values for roles attributed to messages in a thread.""" + + USER = "user" + """The role representing the end-user.""" + AGENT = "assistant" + """The role representing the agent.""" + + +class MessageStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible execution status values for a thread message.""" + + IN_PROGRESS = "in_progress" + """A run is currently creating this message.""" + INCOMPLETE = "incomplete" + """This message is incomplete. See incomplete_details for more information.""" + COMPLETED = "completed" + """This message was successfully completed by a run.""" + + +class MessageStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Message operation related streaming events.""" + + THREAD_MESSAGE_CREATED = "thread.message.created" + """Event sent when a new message is created. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" + """Event sent when a message moves to ``in_progress`` status. The data of this event is of type + ThreadMessage""" + THREAD_MESSAGE_DELTA = "thread.message.delta" + """Event sent when a message is being streamed. The data of this event is of type + MessageDeltaChunk""" + THREAD_MESSAGE_COMPLETED = "thread.message.completed" + """Event sent when a message is completed. The data of this event is of type ThreadMessage""" + THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" + """Event sent before a message is completed. The data of this event is of type ThreadMessage""" + + +class RunStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible values for the status of an agent thread run.""" + + QUEUED = "queued" + """Represents a run that is queued to start.""" + IN_PROGRESS = "in_progress" + """Represents a run that is in progress.""" + REQUIRES_ACTION = "requires_action" + """Represents a run that needs another operation, such as tool output submission, to continue.""" + CANCELLING = "cancelling" + """Represents a run that is in the process of cancellation.""" + CANCELLED = "cancelled" + """Represents a run that has been cancelled.""" + FAILED = "failed" + """Represents a run that failed.""" + COMPLETED = "completed" + """Represents a run that successfully completed.""" + EXPIRED = "expired" + """Represents a run that expired before it could otherwise finish.""" + + +class RunStepErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible error code values attributable to a failed run step.""" + + SERVER_ERROR = "server_error" + """Represents a server error.""" + RATE_LIMIT_EXCEEDED = "rate_limit_exceeded" + """Represents an error indicating configured rate limits were exceeded.""" + + +class RunStepStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible values for the status of a run step.""" + + IN_PROGRESS = "in_progress" + """Represents a run step still in progress.""" + CANCELLED = "cancelled" + """Represents a run step that was cancelled.""" + FAILED = "failed" + """Represents a run step that failed.""" + COMPLETED = "completed" + """Represents a run step that successfully completed.""" + EXPIRED = "expired" + """Represents a run step that expired before otherwise finishing.""" + + +class RunStepStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Run step operation related streaming events.""" + + THREAD_RUN_STEP_CREATED = "thread.run.step.created" + """Event sent when a new thread run step is created. The data of this event is of type RunStep""" + THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" + """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type + RunStep""" + THREAD_RUN_STEP_DELTA = "thread.run.step.delta" + """Event sent when a run step is being streamed. The data of this event is of type + RunStepDeltaChunk""" + THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" + """Event sent when a run step is completed. The data of this event is of type RunStep""" + THREAD_RUN_STEP_FAILED = "thread.run.step.failed" + """Event sent when a run step fails. The data of this event is of type RunStep""" + THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" + """Event sent when a run step is cancelled. The data of this event is of type RunStep""" + THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" + """Event sent when a run step is expired. The data of this event is of type RunStep""" + + +class RunStepType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The possible types of run steps.""" + + MESSAGE_CREATION = "message_creation" + """Represents a run step to create a message.""" + TOOL_CALLS = "tool_calls" + """Represents a run step that calls tools.""" + + +class RunStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Run operation related streaming events.""" + + THREAD_RUN_CREATED = "thread.run.created" + """Event sent when a new run is created. The data of this event is of type ThreadRun""" + THREAD_RUN_QUEUED = "thread.run.queued" + """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" + THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" + """Event sent when a run moves to ``in_progress`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" + """Event sent when a run moves to ``requires_action`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_COMPLETED = "thread.run.completed" + """Event sent when a run is completed. The data of this event is of type ThreadRun""" + THREAD_RUN_FAILED = "thread.run.failed" + """Event sent when a run fails. The data of this event is of type ThreadRun""" + THREAD_RUN_CANCELLING = "thread.run.cancelling" + """Event sent when a run moves to ``cancelling`` status. The data of this event is of type + ThreadRun""" + THREAD_RUN_CANCELLED = "thread.run.cancelled" + """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" + THREAD_RUN_EXPIRED = "thread.run.expired" + """Event sent when a run is expired. The data of this event is of type ThreadRun""" + + +class ThreadStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Thread operation related streaming events.""" + + THREAD_CREATED = "thread.created" + """Event sent when a new thread is created. The data of this event is of type AgentThread""" + + +class TruncationStrategy(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Possible truncation strategies for the thread.""" + + AUTO = "auto" + """Default value. Messages in the middle of the thread will be dropped to fit the context length + of the model.""" + LAST_MESSAGES = "last_messages" + """The thread will truncate to the ``lastMessages`` count of recent messages.""" + + +class VectorStoreChunkingStrategyRequestType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of chunking strategy.""" + + AUTO = "auto" + STATIC = "static" + + +class VectorStoreChunkingStrategyResponseType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of chunking strategy.""" + + OTHER = "other" + STATIC = "static" + + +class VectorStoreExpirationPolicyAnchor(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Describes the relationship between the days and the expiration of this vector store.""" + + LAST_ACTIVE_AT = "last_active_at" + """The expiration policy is based on the last time the vector store was active.""" + + +class VectorStoreFileBatchStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The status of the vector store file batch.""" + + IN_PROGRESS = "in_progress" + """The vector store is still processing this file batch.""" + COMPLETED = "completed" + """the vector store file batch is ready for use.""" + CANCELLED = "cancelled" + """The vector store file batch was cancelled.""" + FAILED = "failed" + """The vector store file batch failed to process.""" + + +class VectorStoreFileErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Error code variants for vector store file processing.""" + + INTERNAL_ERROR = "internal_error" + """An internal error occurred.""" + FILE_NOT_FOUND = "file_not_found" + """The file was not found.""" + PARSING_ERROR = "parsing_error" + """The file could not be parsed.""" + UNHANDLED_MIME_TYPE = "unhandled_mime_type" + """The file has an unhandled mime type.""" + + +class VectorStoreFileStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Vector store file status.""" + + IN_PROGRESS = "in_progress" + """The file is currently being processed.""" + COMPLETED = "completed" + """The file has been successfully processed.""" + FAILED = "failed" + """The file has failed to process.""" + CANCELLED = "cancelled" + """The file was cancelled.""" + + +class VectorStoreFileStatusFilter(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Query parameter filter for vector store file retrieval endpoint.""" + + IN_PROGRESS = "in_progress" + """Retrieve only files that are currently being processed""" + COMPLETED = "completed" + """Retrieve only files that have been successfully processed""" + FAILED = "failed" + """Retrieve only files that have failed to process""" + CANCELLED = "cancelled" + """Retrieve only files that were cancelled""" + + +class VectorStoreStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Vector store possible status.""" + + EXPIRED = "expired" + """expired status indicates that this vector store has expired and is no longer available for use.""" + IN_PROGRESS = "in_progress" + """in_progress status indicates that this vector store is still processing files.""" + COMPLETED = "completed" + """completed status indicates that this vector store is ready for use.""" + + +class WeekDays(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """WeekDay of the schedule - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday.""" + + MONDAY = "Monday" + TUESDAY = "Tuesday" + WEDNESDAY = "Wednesday" + THURSDAY = "Thursday" + FRIDAY = "Friday" + SATURDAY = "Saturday" + SUNDAY = "Sunday" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py new file mode 100644 index 000000000000..27241a07547e --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -0,0 +1,6106 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=useless-super-delegation + +import datetime +from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload + +from .. import _model_base +from .._model_base import rest_discriminator, rest_field +from ._enums import ( + AuthenticationType, + RunStepType, + VectorStoreChunkingStrategyRequestType, + VectorStoreChunkingStrategyResponseType, +) + +if TYPE_CHECKING: + from .. import _types, models as _models + + +class Agent(_model_base.Model): + """Represents an agent that can call the model and use tools. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always assistant. Required. Default value is + "assistant". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar name: The name of the agent. Required. + :vartype name: str + :ivar description: The description of the agent. Required. + :vartype description: str + :ivar model: The ID of the model to use. Required. + :vartype model: str + :ivar instructions: The system instructions for the agent to use. Required. + :vartype instructions: str + :ivar tools: The collection of tools enabled for the agent. Required. + :vartype tools: list[~azure.ai.projects.models.ToolDefinition] + :ivar tool_resources: A set of resources that are used by the agent's tools. The resources are + specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Required. + :vartype tool_resources: ~azure.ai.projects.models.ToolResources + :ivar temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Required. + :vartype temperature: float + :ivar top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Required. + :vartype top_p: float + :ivar response_format: The response format of the tool calls used by this agent. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat + :vartype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["assistant"] = rest_field() + """The object type, which is always assistant. Required. Default value is \"assistant\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + name: str = rest_field() + """The name of the agent. Required.""" + description: str = rest_field() + """The description of the agent. Required.""" + model: str = rest_field() + """The ID of the model to use. Required.""" + instructions: str = rest_field() + """The system instructions for the agent to use. Required.""" + tools: List["_models.ToolDefinition"] = rest_field() + """The collection of tools enabled for the agent. Required.""" + tool_resources: "_models.ToolResources" = rest_field() + """A set of resources that are used by the agent's tools. The resources are specific to the type + of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Required.""" + temperature: float = rest_field() + """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output + more random, + while lower values like 0.2 will make it more focused and deterministic. Required.""" + top_p: float = rest_field() + """An alternative to sampling with temperature, called nucleus sampling, where the model considers + the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Required.""" + response_format: Optional["_types.AgentsApiResponseFormatOption"] = rest_field() + """The response format of the tool calls used by this agent. Is one of the following types: str, + Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + name: str, + description: str, + model: str, + instructions: str, + tools: List["_models.ToolDefinition"], + tool_resources: "_models.ToolResources", + temperature: float, + top_p: float, + metadata: Dict[str, str], + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["assistant"] = "assistant" + + +class AgentDeletionStatus(_model_base.Model): + """The status of an agent deletion operation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'assistant.deleted'. Required. Default value is + "assistant.deleted". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["assistant.deleted"] = rest_field() + """The object type, which is always 'assistant.deleted'. Required. Default value is + \"assistant.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["assistant.deleted"] = "assistant.deleted" + + +class AgentsApiResponseFormat(_model_base.Model): + """An object describing the expected output of the model. If ``json_object`` only ``function`` + type ``tools`` are allowed to be passed to the Run. + If ``text`` the model can return text or any value needed. + + :ivar type: Must be one of ``text`` or ``json_object``. Known values are: "text" and + "json_object". + :vartype type: str or ~azure.ai.projects.models.ApiResponseFormat + """ + + type: Optional[Union[str, "_models.ApiResponseFormat"]] = rest_field() + """Must be one of ``text`` or ``json_object``. Known values are: \"text\" and \"json_object\".""" + + @overload + def __init__( + self, + *, + type: Optional[Union[str, "_models.ApiResponseFormat"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AgentsNamedToolChoice(_model_base.Model): + """Specifies a tool the model should use. Use to force the model to call a specific tool. + + + :ivar type: the type of tool. If type is ``function``\\ , the function name must be set. + Required. Known values are: "function", "code_interpreter", "file_search", "bing_grounding", + "microsoft_fabric", "sharepoint", and "azure_ai_search". + :vartype type: str or ~azure.ai.projects.models.AgentsNamedToolChoiceType + :ivar function: The name of the function to call. + :vartype function: ~azure.ai.projects.models.FunctionName + """ + + type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field() + """the type of tool. If type is ``function``\ , the function name must be set. Required. Known + values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", + \"microsoft_fabric\", \"sharepoint\", and \"azure_ai_search\".""" + function: Optional["_models.FunctionName"] = rest_field() + """The name of the function to call.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.AgentsNamedToolChoiceType"], + function: Optional["_models.FunctionName"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AgentThread(_model_base.Model): + """Information about a single thread associated with an agent. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread'. Required. Default value is "thread". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar tool_resources: A set of resources that are made available to the agent's tools in this + thread. The resources are specific to the type + of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list + of vector store IDs. Required. + :vartype tool_resources: ~azure.ai.projects.models.ToolResources + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread"] = rest_field() + """The object type, which is always 'thread'. Required. Default value is \"thread\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + tool_resources: "_models.ToolResources" = rest_field() + """A set of resources that are made available to the agent's tools in this thread. The resources + are specific to the type + of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list + of vector store IDs. Required.""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + tool_resources: "_models.ToolResources", + metadata: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread"] = "thread" + + +class AgentThreadCreationOptions(_model_base.Model): + """The details used to create a new agent thread. + + :ivar messages: The initial messages to associate with the new thread. + :vartype messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :ivar tool_resources: A set of resources that are made available to the agent's tools in this + thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. + :vartype tool_resources: ~azure.ai.projects.models.ToolResources + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. + :vartype metadata: dict[str, str] + """ + + messages: Optional[List["_models.ThreadMessageOptions"]] = rest_field() + """The initial messages to associate with the new thread.""" + tool_resources: Optional["_models.ToolResources"] = rest_field() + """A set of resources that are made available to the agent's tools in this thread. The resources + are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires + a list of vector store IDs.""" + metadata: Optional[Dict[str, str]] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length.""" + + @overload + def __init__( + self, + *, + messages: Optional[List["_models.ThreadMessageOptions"]] = None, + tool_resources: Optional["_models.ToolResources"] = None, + metadata: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class InputData(_model_base.Model): + """Abstract data class for input data configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ApplicationInsightsConfiguration, Dataset + + + :ivar type: Type of the data. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """Type of the data. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ApplicationInsightsConfiguration(InputData, discriminator="app_insights"): + """Data Source for Application Insights. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar type: Required. Default value is "app_insights". + :vartype type: str + :ivar resource_id: LogAnalytic Workspace resourceID associated with ApplicationInsights. + Required. + :vartype resource_id: str + :ivar query: Query to fetch the data. Required. + :vartype query: str + :ivar service_name: Service name. Required. + :vartype service_name: str + :ivar connection_string: Connection String to connect to ApplicationInsights. + :vartype connection_string: str + """ + + type: Literal["app_insights"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore + """Required. Default value is \"app_insights\".""" + resource_id: str = rest_field(name="resourceId") + """LogAnalytic Workspace resourceID associated with ApplicationInsights. Required.""" + query: str = rest_field() + """Query to fetch the data. Required.""" + service_name: str = rest_field(name="serviceName") + """Service name. Required.""" + connection_string: Optional[str] = rest_field(name="connectionString") + """Connection String to connect to ApplicationInsights.""" + + @overload + def __init__( + self, + *, + resource_id: str, + query: str, + service_name: str, + connection_string: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="app_insights", **kwargs) + + +class AzureAISearchResource(_model_base.Model): + """A set of index resources used by the ``azure_ai_search`` tool. + + :ivar index_list: The indices attached to this agent. There can be a maximum of 1 index + resource attached to the agent. + :vartype index_list: list[~azure.ai.projects.models.IndexResource] + """ + + index_list: Optional[List["_models.IndexResource"]] = rest_field(name="indexes") + """The indices attached to this agent. There can be a maximum of 1 index + resource attached to the agent.""" + + @overload + def __init__( + self, + *, + index_list: Optional[List["_models.IndexResource"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolDefinition(_model_base.Model): + """An abstract representation of an input tool definition that an agent can use. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AzureAISearchToolDefinition, BingGroundingToolDefinition, CodeInterpreterToolDefinition, + FileSearchToolDefinition, FunctionToolDefinition, MicrosoftFabricToolDefinition, + SharepointToolDefinition + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AzureAISearchToolDefinition(ToolDefinition, discriminator="azure_ai_search"): + """The input definition information for an Azure AI search tool as used to configure an agent. + + + :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is + "azure_ai_search". + :vartype type: str + """ + + type: Literal["azure_ai_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'azure_ai_search'. Required. Default value is + \"azure_ai_search\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="azure_ai_search", **kwargs) + + +class BingGroundingToolDefinition(ToolDefinition, discriminator="bing_grounding"): + """The input definition information for a bing grounding search tool as used to configure an + agent. + + + :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is + "bing_grounding". + :vartype type: str + """ + + type: Literal["bing_grounding"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'bing_grounding'. Required. Default value is + \"bing_grounding\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="bing_grounding", **kwargs) + + +class CodeInterpreterToolDefinition(ToolDefinition, discriminator="code_interpreter"): + """The input definition information for a code interpreter tool as used to configure an agent. + + + :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is + "code_interpreter". + :vartype type: str + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'code_interpreter'. Required. Default value is + \"code_interpreter\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="code_interpreter", **kwargs) + + +class CodeInterpreterToolResource(_model_base.Model): + """A set of resources that are used by the ``code_interpreter`` tool. + + :ivar file_ids: A list of file IDs made available to the ``code_interpreter`` tool. There can + be a maximum of 20 files + associated with the tool. + :vartype file_ids: list[str] + """ + + file_ids: Optional[List[str]] = rest_field() + """A list of file IDs made available to the ``code_interpreter`` tool. There can be a maximum of + 20 files + associated with the tool.""" + + @overload + def __init__( + self, + *, + file_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ConnectionListResource(_model_base.Model): + """A set of connection resources currently used by either the ``bing_grounding``\\ , + ``microsoft_fabric``\\ , or ``sharepoint`` tools. + + :ivar connection_list: The connections attached to this agent. There can be a maximum of 1 + connection + resource attached to the agent. + :vartype connection_list: list[~azure.ai.projects.models.ConnectionResource] + """ + + connection_list: Optional[List["_models.ConnectionResource"]] = rest_field(name="connections") + """The connections attached to this agent. There can be a maximum of 1 connection + resource attached to the agent.""" + + @overload + def __init__( + self, + *, + connection_list: Optional[List["_models.ConnectionResource"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ConnectionProperties(_model_base.Model): + """Connection properties. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ConnectionPropertiesAADAuth, ConnectionPropertiesApiKeyAuth, ConnectionPropertiesSASAuth + + + :ivar auth_type: Authentication type of the connection target. Required. Known values are: + "ApiKey", "AAD", and "SAS". + :vartype auth_type: str or ~azure.ai.projects.models.AuthenticationType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + auth_type: str = rest_discriminator(name="authType") + """Authentication type of the connection target. Required. Known values are: \"ApiKey\", \"AAD\", + and \"SAS\".""" + + +class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): + """Connection properties for connections with AAD authentication (aka ``Entra ID passthrough``\\ + ). + + + :ivar auth_type: Authentication type of the connection target. Required. Entra ID + authentication + :vartype auth_type: str or ~azure.ai.projects.models.AAD + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". + :vartype category: str or ~azure.ai.projects.models.ConnectionType + :ivar target: The connection URL to be used for this service. Required. + :vartype target: str + """ + + auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. Entra ID authentication""" + category: Union[str, "_models.ConnectionType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", + \"AzureBlob\", and \"AIServices\".""" + target: str = rest_field() + """The connection URL to be used for this service. Required.""" + + +class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey"): + """Connection properties for connections with API key authentication. + + + :ivar auth_type: Authentication type of the connection target. Required. API Key authentication + :vartype auth_type: str or ~azure.ai.projects.models.API_KEY + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". + :vartype category: str or ~azure.ai.projects.models.ConnectionType + :ivar credentials: Credentials will only be present for authType=ApiKey. Required. + :vartype credentials: ~azure.ai.projects.models._models.CredentialsApiKeyAuth + :ivar target: The connection URL to be used for this service. Required. + :vartype target: str + """ + + auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. API Key authentication""" + category: Union[str, "_models.ConnectionType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", + \"AzureBlob\", and \"AIServices\".""" + credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() + """Credentials will only be present for authType=ApiKey. Required.""" + target: str = rest_field() + """The connection URL to be used for this service. Required.""" + + +class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): + """Connection properties for connections with SAS authentication. + + + :ivar auth_type: Authentication type of the connection target. Required. Shared Access + Signature (SAS) authentication + :vartype auth_type: str or ~azure.ai.projects.models.SAS + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". + :vartype category: str or ~azure.ai.projects.models.ConnectionType + :ivar credentials: Credentials will only be present for authType=ApiKey. Required. + :vartype credentials: ~azure.ai.projects.models._models.CredentialsSASAuth + :ivar target: The connection URL to be used for this service. Required. + :vartype target: str + """ + + auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. Shared Access Signature (SAS) + authentication""" + category: Union[str, "_models.ConnectionType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", + \"AzureBlob\", and \"AIServices\".""" + credentials: "_models._models.CredentialsSASAuth" = rest_field() + """Credentials will only be present for authType=ApiKey. Required.""" + target: str = rest_field() + """The connection URL to be used for this service. Required.""" + + +class ConnectionResource(_model_base.Model): + """A connection resource. + + + :ivar connection_id: A connection in a ConnectionListResource attached to this agent. Required. + :vartype connection_id: str + """ + + connection_id: str = rest_field() + """A connection in a ConnectionListResource attached to this agent. Required.""" + + @overload + def __init__( + self, + *, + connection_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ConnectionsListResponse(_model_base.Model): + """Response from the list operation. + + + :ivar value: A list of connection list secrets. Required. + :vartype value: list[~azure.ai.projects.models._models.ConnectionsListSecretsResponse] + """ + + value: List["_models._models.ConnectionsListSecretsResponse"] = rest_field() + """A list of connection list secrets. Required.""" + + +class ConnectionsListSecretsResponse(_model_base.Model): + """Response from the listSecrets operation. + + + :ivar id: A unique identifier for the connection. Required. + :vartype id: str + :ivar name: The name of the resource. Required. + :vartype name: str + :ivar properties: The properties of the resource. Required. + :vartype properties: ~azure.ai.projects.models._models.ConnectionProperties + """ + + id: str = rest_field() + """A unique identifier for the connection. Required.""" + name: str = rest_field() + """The name of the resource. Required.""" + properties: "_models._models.ConnectionProperties" = rest_field() + """The properties of the resource. Required.""" + + +class CredentialsApiKeyAuth(_model_base.Model): + """The credentials needed for API key authentication. + + + :ivar key: The API key. Required. + :vartype key: str + """ + + key: str = rest_field() + """The API key. Required.""" + + +class CredentialsSASAuth(_model_base.Model): + """The credentials needed for Shared Access Signatures (SAS) authentication. + + + :ivar sas: The Shared Access Signatures (SAS) token. Required. + :vartype sas: str + """ + + sas: str = rest_field(name="SAS") + """The Shared Access Signatures (SAS) token. Required.""" + + +class Trigger(_model_base.Model): + """Abstract data class for input data configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + CronTrigger, RecurrenceTrigger + + + :ivar type: Type of the trigger. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """Type of the trigger. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CronTrigger(Trigger, discriminator="Cron"): + """Cron Trigger Definition. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar type: Required. Default value is "Cron". + :vartype type: str + :ivar expression: Cron expression for the trigger. Required. + :vartype expression: str + """ + + type: Literal["Cron"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore + """Required. Default value is \"Cron\".""" + expression: str = rest_field() + """Cron expression for the trigger. Required.""" + + @overload + def __init__( + self, + *, + expression: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="Cron", **kwargs) + + +class Dataset(InputData, discriminator="dataset"): + """Dataset as source for evaluation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar type: Required. Default value is "dataset". + :vartype type: str + :ivar id: Evaluation input data. Required. + :vartype id: str + """ + + type: Literal["dataset"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore + """Required. Default value is \"dataset\".""" + id: str = rest_field() + """Evaluation input data. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="dataset", **kwargs) + + +class Evaluation(_model_base.Model): + """Evaluation Definition. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: Identifier of the evaluation. Required. + :vartype id: str + :ivar data: Data for evaluation. Required. + :vartype data: ~azure.ai.projects.models.InputData + :ivar display_name: Display Name for evaluation. It helps to find evaluation easily in AI + Studio. It does not need to be unique. + :vartype display_name: str + :ivar description: Description of the evaluation. It can be used to store additional + information about the evaluation and is mutable. + :vartype description: str + :ivar system_data: Metadata containing createdBy and modifiedBy information. + :vartype system_data: ~azure.ai.projects.models.SystemData + :ivar status: Status of the evaluation. It is set by service and is read-only. + :vartype status: str + :ivar tags: Evaluation's tags. Unlike properties, tags are fully mutable. + :vartype tags: dict[str, str] + :ivar properties: Evaluation's properties. Unlike tags, properties are add-only. Once added, a + property cannot be removed. + :vartype properties: dict[str, str] + :ivar evaluators: Evaluators to be used for the evaluation. Required. + :vartype evaluators: dict[str, ~azure.ai.projects.models.EvaluatorConfiguration] + """ + + id: str = rest_field(visibility=["read"]) + """Identifier of the evaluation. Required.""" + data: "_models.InputData" = rest_field(visibility=["read", "create"]) + """Data for evaluation. Required.""" + display_name: Optional[str] = rest_field(name="displayName") + """Display Name for evaluation. It helps to find evaluation easily in AI Studio. It does not need + to be unique.""" + description: Optional[str] = rest_field() + """Description of the evaluation. It can be used to store additional information about the + evaluation and is mutable.""" + system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) + """Metadata containing createdBy and modifiedBy information.""" + status: Optional[str] = rest_field(visibility=["read"]) + """Status of the evaluation. It is set by service and is read-only.""" + tags: Optional[Dict[str, str]] = rest_field() + """Evaluation's tags. Unlike properties, tags are fully mutable.""" + properties: Optional[Dict[str, str]] = rest_field(visibility=["read", "create"]) + """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be + removed.""" + evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field(visibility=["read", "create"]) + """Evaluators to be used for the evaluation. Required.""" + + @overload + def __init__( + self, + *, + data: "_models.InputData", + evaluators: Dict[str, "_models.EvaluatorConfiguration"], + display_name: Optional[str] = None, + description: Optional[str] = None, + tags: Optional[Dict[str, str]] = None, + properties: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EvaluationSchedule(_model_base.Model): + """Evaluation Schedule Definition. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :vartype name: str + :ivar data: Data for evaluation. Required. + :vartype data: ~azure.ai.projects.models.ApplicationInsightsConfiguration + :ivar description: Description of the evaluation. It can be used to store additional + information about the evaluation and is mutable. + :vartype description: str + :ivar system_data: Metadata containing createdBy and modifiedBy information. + :vartype system_data: ~azure.ai.projects.models.SystemData + :ivar provisioning_status: Status of the evaluation. It is set by service and is read-only. + :vartype provisioning_status: str + :ivar tags: Evaluation's tags. Unlike properties, tags are fully mutable. + :vartype tags: dict[str, str] + :ivar properties: Evaluation's properties. Unlike tags, properties are add-only. Once added, a + property cannot be removed. + :vartype properties: dict[str, str] + :ivar evaluators: Evaluators to be used for the evaluation. Required. + :vartype evaluators: dict[str, ~azure.ai.projects.models.EvaluatorConfiguration] + :ivar trigger: Trigger for the evaluation. Required. + :vartype trigger: ~azure.ai.projects.models.Trigger + :ivar sampling_strategy: Sampling strategy for the evaluation. Required. + :vartype sampling_strategy: ~azure.ai.projects.models.SamplingStrategy + """ + + name: str = rest_field(visibility=["read"]) + """Name of the schedule, which also serves as the unique identifier for the evaluation. Required.""" + data: "_models.ApplicationInsightsConfiguration" = rest_field(visibility=["read", "create"]) + """Data for evaluation. Required.""" + description: Optional[str] = rest_field() + """Description of the evaluation. It can be used to store additional information about the + evaluation and is mutable.""" + system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) + """Metadata containing createdBy and modifiedBy information.""" + provisioning_status: Optional[str] = rest_field(name="provisioningStatus", visibility=["read"]) + """Status of the evaluation. It is set by service and is read-only.""" + tags: Optional[Dict[str, str]] = rest_field() + """Evaluation's tags. Unlike properties, tags are fully mutable.""" + properties: Optional[Dict[str, str]] = rest_field(visibility=["read", "create"]) + """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be + removed.""" + evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field(visibility=["read", "create"]) + """Evaluators to be used for the evaluation. Required.""" + trigger: "_models.Trigger" = rest_field() + """Trigger for the evaluation. Required.""" + sampling_strategy: "_models.SamplingStrategy" = rest_field(name="samplingStrategy") + """Sampling strategy for the evaluation. Required.""" + + @overload + def __init__( + self, + *, + data: "_models.ApplicationInsightsConfiguration", + evaluators: Dict[str, "_models.EvaluatorConfiguration"], + trigger: "_models.Trigger", + sampling_strategy: "_models.SamplingStrategy", + description: Optional[str] = None, + tags: Optional[Dict[str, str]] = None, + properties: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EvaluatorConfiguration(_model_base.Model): + """Evaluator Configuration. + + + :ivar id: Identifier of the evaluator. Required. + :vartype id: str + :ivar init_params: Initialization parameters of the evaluator. + :vartype init_params: dict[str, any] + :ivar data_mapping: Data parameters of the evaluator. + :vartype data_mapping: dict[str, str] + """ + + id: str = rest_field() + """Identifier of the evaluator. Required.""" + init_params: Optional[Dict[str, Any]] = rest_field(name="initParams") + """Initialization parameters of the evaluator.""" + data_mapping: Optional[Dict[str, str]] = rest_field(name="dataMapping") + """Data parameters of the evaluator.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + init_params: Optional[Dict[str, Any]] = None, + data_mapping: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FileContentResponse(_model_base.Model): + """A response from a file get content operation. + + + :ivar content: The content of the file, in bytes. Required. + :vartype content: bytes + """ + + content: bytes = rest_field(format="base64") + """The content of the file, in bytes. Required.""" + + @overload + def __init__( + self, + *, + content: bytes, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FileDeletionStatus(_model_base.Model): + """A status response from a file deletion operation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'file'. Required. Default value is "file". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["file"] = rest_field() + """The object type, which is always 'file'. Required. Default value is \"file\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["file"] = "file" + + +class FileListResponse(_model_base.Model): + """The response data from a file list operation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always 'list'. Required. Default value is "list". + :vartype object: str + :ivar data: The files returned for the request. Required. + :vartype data: list[~azure.ai.projects.models.OpenAIFile] + """ + + object: Literal["list"] = rest_field() + """The object type, which is always 'list'. Required. Default value is \"list\".""" + data: List["_models.OpenAIFile"] = rest_field() + """The files returned for the request. Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.OpenAIFile"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class FileSearchToolDefinition(ToolDefinition, discriminator="file_search"): + """The input definition information for a file search tool as used to configure an agent. + + + :ivar type: The object type, which is always 'file_search'. Required. Default value is + "file_search". + :vartype type: str + :ivar file_search: Options overrides for the file search tool. + :vartype file_search: ~azure.ai.projects.models.FileSearchToolDefinitionDetails + """ + + type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" + file_search: Optional["_models.FileSearchToolDefinitionDetails"] = rest_field() + """Options overrides for the file search tool.""" + + @overload + def __init__( + self, + *, + file_search: Optional["_models.FileSearchToolDefinitionDetails"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_search", **kwargs) + + +class FileSearchToolDefinitionDetails(_model_base.Model): + """Options overrides for the file search tool. + + :ivar max_num_results: The maximum number of results the file search tool should output. The + default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 + inclusive. + + Note that the file search tool may output fewer than ``max_num_results`` results. See the file + search tool documentation for more information. + :vartype max_num_results: int + """ + + max_num_results: Optional[int] = rest_field() + """The maximum number of results the file search tool should output. The default is 20 for gpt-4* + models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. + + Note that the file search tool may output fewer than ``max_num_results`` results. See the file + search tool documentation for more information.""" + + @overload + def __init__( + self, + *, + max_num_results: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FileSearchToolResource(_model_base.Model): + """A set of resources that are used by the ``file_search`` tool. + + :ivar vector_store_ids: The ID of the vector store attached to this agent. There can be a + maximum of 1 vector + store attached to the agent. + :vartype vector_store_ids: list[str] + """ + + vector_store_ids: Optional[List[str]] = rest_field() + """The ID of the vector store attached to this agent. There can be a maximum of 1 vector + store attached to the agent.""" + + @overload + def __init__( + self, + *, + vector_store_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FunctionDefinition(_model_base.Model): + """The input definition information for a function. + + + :ivar name: The name of the function to be called. Required. + :vartype name: str + :ivar description: A description of what the function does, used by the model to choose when + and how to call the function. + :vartype description: str + :ivar parameters: The parameters the functions accepts, described as a JSON Schema object. + Required. + :vartype parameters: any + """ + + name: str = rest_field() + """The name of the function to be called. Required.""" + description: Optional[str] = rest_field() + """A description of what the function does, used by the model to choose when and how to call the + function.""" + parameters: Any = rest_field() + """The parameters the functions accepts, described as a JSON Schema object. Required.""" + + @overload + def __init__( + self, + *, + name: str, + parameters: Any, + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FunctionName(_model_base.Model): + """The function name that will be used, if using the ``function`` tool. + + + :ivar name: The name of the function to call. Required. + :vartype name: str + """ + + name: str = rest_field() + """The name of the function to call. Required.""" + + @overload + def __init__( + self, + *, + name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FunctionToolDefinition(ToolDefinition, discriminator="function"): + """The input definition information for a function tool as used to configure an agent. + + + :ivar type: The object type, which is always 'function'. Required. Default value is "function". + :vartype type: str + :ivar function: The definition of the concrete function that the function tool should call. + Required. + :vartype function: ~azure.ai.projects.models.FunctionDefinition + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'function'. Required. Default value is \"function\".""" + function: "_models.FunctionDefinition" = rest_field() + """The definition of the concrete function that the function tool should call. Required.""" + + @overload + def __init__( + self, + *, + function: "_models.FunctionDefinition", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class IndexResource(_model_base.Model): + """A Index resource. + + + :ivar index_connection_id: An index connection id in an IndexResource attached to this agent. + Required. + :vartype index_connection_id: str + :ivar index_name: The name of an index in an IndexResource attached to this agent. Required. + :vartype index_name: str + """ + + index_connection_id: str = rest_field() + """An index connection id in an IndexResource attached to this agent. Required.""" + index_name: str = rest_field() + """The name of an index in an IndexResource attached to this agent. Required.""" + + @overload + def __init__( + self, + *, + index_connection_id: str, + index_name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageAttachment(_model_base.Model): + """This describes to which tools a file has been attached. + + + :ivar file_id: The ID of the file to attach to the message. Required. + :vartype file_id: str + :ivar tools: The tools to add to this file. Required. + :vartype tools: list[~azure.ai.projects.models.CodeInterpreterToolDefinition or + ~azure.ai.projects.models.FileSearchToolDefinition] + """ + + file_id: str = rest_field() + """The ID of the file to attach to the message. Required.""" + tools: List["_types.MessageAttachmentToolDefinition"] = rest_field() + """The tools to add to this file. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + tools: List["_types.MessageAttachmentToolDefinition"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageContent(_model_base.Model): + """An abstract representation of a single item of thread message content. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageImageFileContent, MessageTextContent + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDelta(_model_base.Model): + """Represents the typed 'delta' payload within a streaming message delta chunk. + + + :ivar role: The entity that produced the message. Required. Known values are: "user" and + "assistant". + :vartype role: str or ~azure.ai.projects.models.MessageRole + :ivar content: The content of the message as an array of text and/or images. Required. + :vartype content: list[~azure.ai.projects.models.MessageDeltaContent] + """ + + role: Union[str, "_models.MessageRole"] = rest_field() + """The entity that produced the message. Required. Known values are: \"user\" and \"assistant\".""" + content: List["_models.MessageDeltaContent"] = rest_field() + """The content of the message as an array of text and/or images. Required.""" + + @overload + def __init__( + self, + *, + role: Union[str, "_models.MessageRole"], + content: List["_models.MessageDeltaContent"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaChunk(_model_base.Model): + """Represents a message delta i.e. any changed fields on a message during streaming. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier of the message, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``thread.message.delta``. Required. Default + value is "thread.message.delta". + :vartype object: str + :ivar delta: The delta containing the fields that have changed on the Message. Required. + :vartype delta: ~azure.ai.projects.models.MessageDelta + """ + + id: str = rest_field() + """The identifier of the message, which can be referenced in API endpoints. Required.""" + object: Literal["thread.message.delta"] = rest_field() + """The object type, which is always ``thread.message.delta``. Required. Default value is + \"thread.message.delta\".""" + delta: "_models.MessageDelta" = rest_field() + """The delta containing the fields that have changed on the Message. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + delta: "_models.MessageDelta", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.message.delta"] = "thread.message.delta" + + +class MessageDeltaContent(_model_base.Model): + """The abstract base representation of a partial streamed message content payload. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageDeltaImageFileContent, MessageDeltaTextContent + + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field() + """The index of the content part of the message. Required.""" + type: str = rest_discriminator(name="type") + """The type of content for this content part. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaImageFileContent(MessageDeltaContent, discriminator="image_file"): + """Represents a streamed image file content part within a streaming message delta chunk. + + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part, which is always "image_file.". Required. + Default value is "image_file". + :vartype type: str + :ivar image_file: The image_file data. + :vartype image_file: ~azure.ai.projects.models.MessageDeltaImageFileContentObject + """ + + type: Literal["image_file"] = rest_discriminator(name="type") # type: ignore + """The type of content for this content part, which is always \"image_file.\". Required. Default + value is \"image_file\".""" + image_file: Optional["_models.MessageDeltaImageFileContentObject"] = rest_field() + """The image_file data.""" + + @overload + def __init__( + self, + *, + index: int, + image_file: Optional["_models.MessageDeltaImageFileContentObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image_file", **kwargs) + + +class MessageDeltaImageFileContentObject(_model_base.Model): + """Represents the 'image_file' payload within streaming image file content. + + :ivar file_id: The file ID of the image in the message content. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field() + """The file ID of the image in the message content.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextAnnotation(_model_base.Model): + """The abstract base representation of a streamed text content part's text annotation. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageDeltaTextFileCitationAnnotation, MessageDeltaTextFilePathAnnotation + + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field() + """The index of the annotation within a text content part. Required.""" + type: str = rest_discriminator(name="type") + """The type of the text content annotation. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextContent(MessageDeltaContent, discriminator="text"): + """Represents a streamed text content part within a streaming message delta chunk. + + + :ivar index: The index of the content part of the message. Required. + :vartype index: int + :ivar type: The type of content for this content part, which is always "text.". Required. + Default value is "text". + :vartype type: str + :ivar text: The text content details. + :vartype text: ~azure.ai.projects.models.MessageDeltaTextContentObject + """ + + type: Literal["text"] = rest_discriminator(name="type") # type: ignore + """The type of content for this content part, which is always \"text.\". Required. Default value + is \"text\".""" + text: Optional["_models.MessageDeltaTextContentObject"] = rest_field() + """The text content details.""" + + @overload + def __init__( + self, + *, + index: int, + text: Optional["_models.MessageDeltaTextContentObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="text", **kwargs) + + +class MessageDeltaTextContentObject(_model_base.Model): + """Represents the data of a streamed text content part within a streaming message delta chunk. + + :ivar value: The data that makes up the text. + :vartype value: str + :ivar annotations: Annotations for the text. + :vartype annotations: list[~azure.ai.projects.models.MessageDeltaTextAnnotation] + """ + + value: Optional[str] = rest_field() + """The data that makes up the text.""" + annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = rest_field() + """Annotations for the text.""" + + @overload + def __init__( + self, + *, + value: Optional[str] = None, + annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextFileCitationAnnotation(MessageDeltaTextAnnotation, discriminator="file_citation"): + """Represents a streamed file citation applied to a streaming text content part. + + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation, which is always "file_citation.". + Required. Default value is "file_citation". + :vartype type: str + :ivar file_citation: The file citation information. + :vartype file_citation: ~azure.ai.projects.models.MessageDeltaTextFileCitationAnnotationObject + :ivar text: The text in the message content that needs to be replaced. + :vartype text: str + :ivar start_index: The start index of this annotation in the content text. + :vartype start_index: int + :ivar end_index: The end index of this annotation in the content text. + :vartype end_index: int + """ + + type: Literal["file_citation"] = rest_discriminator(name="type") # type: ignore + """The type of the text content annotation, which is always \"file_citation.\". Required. Default + value is \"file_citation\".""" + file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = rest_field() + """The file citation information.""" + text: Optional[str] = rest_field() + """The text in the message content that needs to be replaced.""" + start_index: Optional[int] = rest_field() + """The start index of this annotation in the content text.""" + end_index: Optional[int] = rest_field() + """The end index of this annotation in the content text.""" + + @overload + def __init__( + self, + *, + index: int, + file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = None, + text: Optional[str] = None, + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_citation", **kwargs) + + +class MessageDeltaTextFileCitationAnnotationObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the data of a streamed file citation as applied to a streaming text content part. + + :ivar file_id: The ID of the specific file the citation is from. + :vartype file_id: str + :ivar quote: The specific quote in the cited file. + :vartype quote: str + """ + + file_id: Optional[str] = rest_field() + """The ID of the specific file the citation is from.""" + quote: Optional[str] = rest_field() + """The specific quote in the cited file.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + quote: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageDeltaTextFilePathAnnotation(MessageDeltaTextAnnotation, discriminator="file_path"): + """Represents a streamed file path annotation applied to a streaming text content part. + + + :ivar index: The index of the annotation within a text content part. Required. + :vartype index: int + :ivar type: The type of the text content annotation, which is always "file_path.". Required. + Default value is "file_path". + :vartype type: str + :ivar file_path: The file path information. + :vartype file_path: ~azure.ai.projects.models.MessageDeltaTextFilePathAnnotationObject + :ivar start_index: The start index of this annotation in the content text. + :vartype start_index: int + :ivar end_index: The end index of this annotation in the content text. + :vartype end_index: int + :ivar text: The text in the message content that needs to be replaced. + :vartype text: str + """ + + type: Literal["file_path"] = rest_discriminator(name="type") # type: ignore + """The type of the text content annotation, which is always \"file_path.\". Required. Default + value is \"file_path\".""" + file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = rest_field() + """The file path information.""" + start_index: Optional[int] = rest_field() + """The start index of this annotation in the content text.""" + end_index: Optional[int] = rest_field() + """The end index of this annotation in the content text.""" + text: Optional[str] = rest_field() + """The text in the message content that needs to be replaced.""" + + @overload + def __init__( + self, + *, + index: int, + file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = None, + start_index: Optional[int] = None, + end_index: Optional[int] = None, + text: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_path", **kwargs) + + +class MessageDeltaTextFilePathAnnotationObject(_model_base.Model): + """Represents the data of a streamed file path annotation as applied to a streaming text content + part. + + :ivar file_id: The file ID for the annotation. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field() + """The file ID for the annotation.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageImageFileContent(MessageContent, discriminator="image_file"): + """A representation of image file content in a thread message. + + + :ivar type: The object type, which is always 'image_file'. Required. Default value is + "image_file". + :vartype type: str + :ivar image_file: The image file for this thread message content item. Required. + :vartype image_file: ~azure.ai.projects.models.MessageImageFileDetails + """ + + type: Literal["image_file"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'image_file'. Required. Default value is \"image_file\".""" + image_file: "_models.MessageImageFileDetails" = rest_field() + """The image file for this thread message content item. Required.""" + + @overload + def __init__( + self, + *, + image_file: "_models.MessageImageFileDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image_file", **kwargs) + + +class MessageImageFileDetails(_model_base.Model): + """An image reference, as represented in thread message content. + + + :ivar file_id: The ID for the file associated with this image. Required. + :vartype file_id: str + """ + + file_id: str = rest_field() + """The ID for the file associated with this image. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageIncompleteDetails(_model_base.Model): + """Information providing additional detail about a message entering an incomplete status. + + + :ivar reason: The provided reason describing why the message was marked as incomplete. + Required. Known values are: "content_filter", "max_tokens", "run_cancelled", "run_failed", and + "run_expired". + :vartype reason: str or ~azure.ai.projects.models.MessageIncompleteDetailsReason + """ + + reason: Union[str, "_models.MessageIncompleteDetailsReason"] = rest_field() + """The provided reason describing why the message was marked as incomplete. Required. Known values + are: \"content_filter\", \"max_tokens\", \"run_cancelled\", \"run_failed\", and + \"run_expired\".""" + + @overload + def __init__( + self, + *, + reason: Union[str, "_models.MessageIncompleteDetailsReason"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextAnnotation(_model_base.Model): + """An abstract representation of an annotation to text thread message content. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MessageTextFileCitationAnnotation, MessageTextFilePathAnnotation + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + text: str = rest_field() + """The textual content associated with this text annotation item. Required.""" + + @overload + def __init__( + self, + *, + type: str, + text: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextContent(MessageContent, discriminator="text"): + """A representation of a textual item of thread message content. + + + :ivar type: The object type, which is always 'text'. Required. Default value is "text". + :vartype type: str + :ivar text: The text and associated annotations for this thread message content item. Required. + :vartype text: ~azure.ai.projects.models.MessageTextDetails + """ + + type: Literal["text"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'text'. Required. Default value is \"text\".""" + text: "_models.MessageTextDetails" = rest_field() + """The text and associated annotations for this thread message content item. Required.""" + + @overload + def __init__( + self, + *, + text: "_models.MessageTextDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="text", **kwargs) + + +class MessageTextDetails(_model_base.Model): + """The text and associated annotations for a single item of agent thread message content. + + + :ivar value: The text data. Required. + :vartype value: str + :ivar annotations: A list of annotations associated with this text. Required. + :vartype annotations: list[~azure.ai.projects.models.MessageTextAnnotation] + """ + + value: str = rest_field() + """The text data. Required.""" + annotations: List["_models.MessageTextAnnotation"] = rest_field() + """A list of annotations associated with this text. Required.""" + + @overload + def __init__( + self, + *, + value: str, + annotations: List["_models.MessageTextAnnotation"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextFileCitationAnnotation(MessageTextAnnotation, discriminator="file_citation"): + """A citation within the message that points to a specific quote from a specific File associated + with the agent or the message. Generated when the agent uses the 'file_search' tool to search + files. + + + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + :ivar type: The object type, which is always 'file_citation'. Required. Default value is + "file_citation". + :vartype type: str + :ivar file_citation: A citation within the message that points to a specific quote from a + specific file. + Generated when the agent uses the "file_search" tool to search files. Required. + :vartype file_citation: ~azure.ai.projects.models.MessageTextFileCitationDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["file_citation"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'file_citation'. Required. Default value is \"file_citation\".""" + file_citation: "_models.MessageTextFileCitationDetails" = rest_field() + """A citation within the message that points to a specific quote from a specific file. + Generated when the agent uses the \"file_search\" tool to search files. Required.""" + start_index: Optional[int] = rest_field() + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field() + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + text: str, + file_citation: "_models.MessageTextFileCitationDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_citation", **kwargs) + + +class MessageTextFileCitationDetails(_model_base.Model): + """A representation of a file-based text citation, as used in a file-based annotation of text + thread message content. + + + :ivar file_id: The ID of the file associated with this citation. Required. + :vartype file_id: str + :ivar quote: The specific quote cited in the associated file. Required. + :vartype quote: str + """ + + file_id: str = rest_field() + """The ID of the file associated with this citation. Required.""" + quote: str = rest_field() + """The specific quote cited in the associated file. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + quote: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MessageTextFilePathAnnotation(MessageTextAnnotation, discriminator="file_path"): + """A citation within the message that points to a file located at a specific path. + + + :ivar text: The textual content associated with this text annotation item. Required. + :vartype text: str + :ivar type: The object type, which is always 'file_path'. Required. Default value is + "file_path". + :vartype type: str + :ivar file_path: A URL for the file that's generated when the agent used the code_interpreter + tool to generate a file. Required. + :vartype file_path: ~azure.ai.projects.models.MessageTextFilePathDetails + :ivar start_index: The first text index associated with this text annotation. + :vartype start_index: int + :ivar end_index: The last text index associated with this text annotation. + :vartype end_index: int + """ + + type: Literal["file_path"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'file_path'. Required. Default value is \"file_path\".""" + file_path: "_models.MessageTextFilePathDetails" = rest_field() + """A URL for the file that's generated when the agent used the code_interpreter tool to generate a + file. Required.""" + start_index: Optional[int] = rest_field() + """The first text index associated with this text annotation.""" + end_index: Optional[int] = rest_field() + """The last text index associated with this text annotation.""" + + @overload + def __init__( + self, + *, + text: str, + file_path: "_models.MessageTextFilePathDetails", + start_index: Optional[int] = None, + end_index: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_path", **kwargs) + + +class MessageTextFilePathDetails(_model_base.Model): + """An encapsulation of an image file ID, as used by message image content. + + + :ivar file_id: The ID of the specific file that the citation is from. Required. + :vartype file_id: str + """ + + file_id: str = rest_field() + """The ID of the specific file that the citation is from. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="microsoft_fabric"): + """The input definition information for a Microsoft Fabric tool as used to configure an agent. + + + :ivar type: The object type, which is always 'microsoft_fabric'. Required. Default value is + "microsoft_fabric". + :vartype type: str + """ + + type: Literal["microsoft_fabric"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'microsoft_fabric'. Required. Default value is + \"microsoft_fabric\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="microsoft_fabric", **kwargs) + + +class OpenAIFile(_model_base.Model): + """Represents an agent that can call the model and use tools. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always 'file'. Required. Default value is "file". + :vartype object: str + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar bytes: The size of the file, in bytes. Required. + :vartype bytes: int + :ivar filename: The name of the file. Required. + :vartype filename: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar purpose: The intended purpose of a file. Required. Known values are: "fine-tune", + "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". + :vartype purpose: str or ~azure.ai.projects.models.FilePurpose + :ivar status: The state of the file. This field is available in Azure OpenAI only. Known values + are: "uploaded", "pending", "running", "processed", "error", "deleting", and "deleted". + :vartype status: str or ~azure.ai.projects.models.FileState + :ivar status_details: The error message with details in case processing of this file failed. + This field is available in Azure OpenAI only. + :vartype status_details: str + """ + + object: Literal["file"] = rest_field() + """The object type, which is always 'file'. Required. Default value is \"file\".""" + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + bytes: int = rest_field() + """The size of the file, in bytes. Required.""" + filename: str = rest_field() + """The name of the file. Required.""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + purpose: Union[str, "_models.FilePurpose"] = rest_field() + """The intended purpose of a file. Required. Known values are: \"fine-tune\", + \"fine-tune-results\", \"assistants\", \"assistants_output\", \"batch\", \"batch_output\", and + \"vision\".""" + status: Optional[Union[str, "_models.FileState"]] = rest_field() + """The state of the file. This field is available in Azure OpenAI only. Known values are: + \"uploaded\", \"pending\", \"running\", \"processed\", \"error\", \"deleting\", and + \"deleted\".""" + status_details: Optional[str] = rest_field() + """The error message with details in case processing of this file failed. This field is available + in Azure OpenAI only.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + bytes: int, + filename: str, + created_at: datetime.datetime, + purpose: Union[str, "_models.FilePurpose"], + status: Optional[Union[str, "_models.FileState"]] = None, + status_details: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["file"] = "file" + + +class OpenAIPageableListOfAgent(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.projects.models.Agent] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.Agent"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.Agent"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfRunStep(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.projects.models.RunStep] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.RunStep"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.RunStep"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfThreadMessage(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.projects.models.ThreadMessage] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.ThreadMessage"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.ThreadMessage"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfThreadRun(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.projects.models.ThreadRun] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.ThreadRun"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.ThreadRun"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfVectorStore(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.projects.models.VectorStore] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.VectorStore"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.VectorStore"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class OpenAIPageableListOfVectorStoreFile(_model_base.Model): + """The response data for a requested list of items. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar object: The object type, which is always list. Required. Default value is "list". + :vartype object: str + :ivar data: The requested list of items. Required. + :vartype data: list[~azure.ai.projects.models.VectorStoreFile] + :ivar first_id: The first ID represented in this list. Required. + :vartype first_id: str + :ivar last_id: The last ID represented in this list. Required. + :vartype last_id: str + :ivar has_more: A value indicating whether there are additional values available not captured + in this list. Required. + :vartype has_more: bool + """ + + object: Literal["list"] = rest_field() + """The object type, which is always list. Required. Default value is \"list\".""" + data: List["_models.VectorStoreFile"] = rest_field() + """The requested list of items. Required.""" + first_id: str = rest_field() + """The first ID represented in this list. Required.""" + last_id: str = rest_field() + """The last ID represented in this list. Required.""" + has_more: bool = rest_field() + """A value indicating whether there are additional values available not captured in this list. + Required.""" + + @overload + def __init__( + self, + *, + data: List["_models.VectorStoreFile"], + first_id: str, + last_id: str, + has_more: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["list"] = "list" + + +class RecurrenceSchedule(_model_base.Model): + """RecurrenceSchedule Definition. + + + :ivar hours: List of hours for the schedule. Required. + :vartype hours: list[int] + :ivar minutes: List of minutes for the schedule. Required. + :vartype minutes: list[int] + :ivar week_days: List of days for the schedule. + :vartype week_days: list[str or ~azure.ai.projects.models.WeekDays] + :ivar month_days: List of month days for the schedule. + :vartype month_days: list[int] + """ + + hours: List[int] = rest_field() + """List of hours for the schedule. Required.""" + minutes: List[int] = rest_field() + """List of minutes for the schedule. Required.""" + week_days: Optional[List[Union[str, "_models.WeekDays"]]] = rest_field(name="weekDays") + """List of days for the schedule.""" + month_days: Optional[List[int]] = rest_field(name="monthDays") + """List of month days for the schedule.""" + + @overload + def __init__( + self, + *, + hours: List[int], + minutes: List[int], + week_days: Optional[List[Union[str, "_models.WeekDays"]]] = None, + month_days: Optional[List[int]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RecurrenceTrigger(Trigger, discriminator="Recurrence"): + """Recurrence Trigger Definition. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar type: Required. Default value is "Recurrence". + :vartype type: str + :ivar frequency: The frequency to trigger schedule. Required. Known values are: "Month", + "Week", "Day", "Hour", and "Minute". + :vartype frequency: str or ~azure.ai.projects.models.Frequency + :ivar interval: Specifies schedule interval in conjunction with frequency. Required. + :vartype interval: int + :ivar schedule: The recurrence schedule. + :vartype schedule: ~azure.ai.projects.models.RecurrenceSchedule + """ + + type: Literal["Recurrence"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore + """Required. Default value is \"Recurrence\".""" + frequency: Union[str, "_models.Frequency"] = rest_field() + """The frequency to trigger schedule. Required. Known values are: \"Month\", \"Week\", \"Day\", + \"Hour\", and \"Minute\".""" + interval: int = rest_field() + """Specifies schedule interval in conjunction with frequency. Required.""" + schedule: Optional["_models.RecurrenceSchedule"] = rest_field() + """The recurrence schedule.""" + + @overload + def __init__( + self, + *, + frequency: Union[str, "_models.Frequency"], + interval: int, + schedule: Optional["_models.RecurrenceSchedule"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="Recurrence", **kwargs) + + +class RequiredAction(_model_base.Model): + """An abstract representation of a required action for an agent thread run to continue. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + SubmitToolOutputsAction + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RequiredToolCall(_model_base.Model): + """An abstract representation a a tool invocation needed by the model to continue a run. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RequiredFunctionToolCall + + + :ivar type: The object type for the required tool call. Required. Default value is None. + :vartype type: str + :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. + Required. + :vartype id: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type for the required tool call. Required. Default value is None.""" + id: str = rest_field() + """The ID of the tool call. This ID must be referenced when submitting tool outputs. Required.""" + + @overload + def __init__( + self, + *, + type: str, + id: str, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RequiredFunctionToolCall(RequiredToolCall, discriminator="function"): + """A representation of a requested call to a function tool, needed by the model to continue + evaluation of a run. + + + :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. + Required. + :vartype id: str + :ivar type: The object type of the required tool call. Always 'function' for function tools. + Required. Default value is "function". + :vartype type: str + :ivar function: Detailed information about the function to be executed by the tool that + includes name and arguments. Required. + :vartype function: ~azure.ai.projects.models.RequiredFunctionToolCallDetails + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object type of the required tool call. Always 'function' for function tools. Required. + Default value is \"function\".""" + function: "_models.RequiredFunctionToolCallDetails" = rest_field() + """Detailed information about the function to be executed by the tool that includes name and + arguments. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + function: "_models.RequiredFunctionToolCallDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class RequiredFunctionToolCallDetails(_model_base.Model): + """The detailed information for a function invocation, as provided by a required action invoking a + function tool, that includes the name of and arguments to the function. + + + :ivar name: The name of the function. Required. + :vartype name: str + :ivar arguments: The arguments to use when invoking the named function, as provided by the + model. Arguments are presented as a JSON document that should be validated and parsed for + evaluation. Required. + :vartype arguments: str + """ + + name: str = rest_field() + """The name of the function. Required.""" + arguments: str = rest_field() + """The arguments to use when invoking the named function, as provided by the model. Arguments are + presented as a JSON document that should be validated and parsed for evaluation. Required.""" + + @overload + def __init__( + self, + *, + name: str, + arguments: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunCompletionUsage(_model_base.Model): + """Usage statistics related to the run. This value will be ``null`` if the run is not in a + terminal state (i.e. ``in_progress``\\ , ``queued``\\ , etc.). + + + :ivar completion_tokens: Number of completion tokens used over the course of the run. Required. + :vartype completion_tokens: int + :ivar prompt_tokens: Number of prompt tokens used over the course of the run. Required. + :vartype prompt_tokens: int + :ivar total_tokens: Total number of tokens used (prompt + completion). Required. + :vartype total_tokens: int + """ + + completion_tokens: int = rest_field() + """Number of completion tokens used over the course of the run. Required.""" + prompt_tokens: int = rest_field() + """Number of prompt tokens used over the course of the run. Required.""" + total_tokens: int = rest_field() + """Total number of tokens used (prompt + completion). Required.""" + + @overload + def __init__( + self, + *, + completion_tokens: int, + prompt_tokens: int, + total_tokens: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunError(_model_base.Model): + """The details of an error as encountered by an agent thread run. + + + :ivar code: The status for the error. Required. + :vartype code: str + :ivar message: The human-readable text associated with the error. Required. + :vartype message: str + """ + + code: str = rest_field() + """The status for the error. Required.""" + message: str = rest_field() + """The human-readable text associated with the error. Required.""" + + @overload + def __init__( + self, + *, + code: str, + message: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStep(_model_base.Model): + """Detailed information about a single step of an agent thread run. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.run.step'. Required. Default value is + "thread.run.step". + :vartype object: str + :ivar type: The type of run step, which can be either message_creation or tool_calls. Required. + Known values are: "message_creation" and "tool_calls". + :vartype type: str or ~azure.ai.projects.models.RunStepType + :ivar assistant_id: The ID of the agent associated with the run step. Required. + :vartype assistant_id: str + :ivar thread_id: The ID of the thread that was run. Required. + :vartype thread_id: str + :ivar run_id: The ID of the run that this run step is a part of. Required. + :vartype run_id: str + :ivar status: The status of this run step. Required. Known values are: "in_progress", + "cancelled", "failed", "completed", and "expired". + :vartype status: str or ~azure.ai.projects.models.RunStepStatus + :ivar step_details: The details for this run step. Required. + :vartype step_details: ~azure.ai.projects.models.RunStepDetails + :ivar last_error: If applicable, information about the last error encountered by this run step. + Required. + :vartype last_error: ~azure.ai.projects.models.RunStepError + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar expired_at: The Unix timestamp, in seconds, representing when this item expired. + Required. + :vartype expired_at: ~datetime.datetime + :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. + :vartype completed_at: ~datetime.datetime + :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. + Required. + :vartype cancelled_at: ~datetime.datetime + :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. + :vartype failed_at: ~datetime.datetime + :ivar usage: Usage statistics related to the run step. This value will be ``null`` while the + run step's status is ``in_progress``. + :vartype usage: ~azure.ai.projects.models.RunStepCompletionUsage + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run.step"] = rest_field() + """The object type, which is always 'thread.run.step'. Required. Default value is + \"thread.run.step\".""" + type: Union[str, "_models.RunStepType"] = rest_field() + """The type of run step, which can be either message_creation or tool_calls. Required. Known + values are: \"message_creation\" and \"tool_calls\".""" + assistant_id: str = rest_field() + """The ID of the agent associated with the run step. Required.""" + thread_id: str = rest_field() + """The ID of the thread that was run. Required.""" + run_id: str = rest_field() + """The ID of the run that this run step is a part of. Required.""" + status: Union[str, "_models.RunStepStatus"] = rest_field() + """The status of this run step. Required. Known values are: \"in_progress\", \"cancelled\", + \"failed\", \"completed\", and \"expired\".""" + step_details: "_models.RunStepDetails" = rest_field() + """The details for this run step. Required.""" + last_error: "_models.RunStepError" = rest_field() + """If applicable, information about the last error encountered by this run step. Required.""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + expired_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this item expired. Required.""" + completed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this completed. Required.""" + cancelled_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" + failed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this failed. Required.""" + usage: Optional["_models.RunStepCompletionUsage"] = rest_field() + """Usage statistics related to the run step. This value will be ``null`` while the run step's + status is ``in_progress``.""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + type: Union[str, "_models.RunStepType"], + assistant_id: str, + thread_id: str, + run_id: str, + status: Union[str, "_models.RunStepStatus"], + step_details: "_models.RunStepDetails", + last_error: "_models.RunStepError", + created_at: datetime.datetime, + expired_at: datetime.datetime, + completed_at: datetime.datetime, + cancelled_at: datetime.datetime, + failed_at: datetime.datetime, + metadata: Dict[str, str], + usage: Optional["_models.RunStepCompletionUsage"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run.step"] = "thread.run.step" + + +class RunStepToolCall(_model_base.Model): + """An abstract representation of a detailed tool call as recorded within a run step for an + existing run. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepAzureAISearchToolCall, RunStepBingGroundingToolCall, RunStepCodeInterpreterToolCall, + RunStepFileSearchToolCall, RunStepFunctionToolCall, RunStepMicrosoftFabricToolCall, + RunStepSharepointToolCall + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + id: str = rest_field() + """The ID of the tool call. This ID must be referenced when you submit tool outputs. Required.""" + + @overload + def __init__( + self, + *, + type: str, + id: str, # pylint: disable=redefined-builtin + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepAzureAISearchToolCall(RunStepToolCall, discriminator="azure_ai_search"): + """A record of a call to an Azure AI Search tool, issued by the model in evaluation of a defined + tool, that represents + executed Azure AI search. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is + "azure_ai_search". + :vartype type: str + :ivar azure_ai_search: Reserved for future use. Required. + :vartype azure_ai_search: dict[str, str] + """ + + type: Literal["azure_ai_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'azure_ai_search'. Required. Default value is + \"azure_ai_search\".""" + azure_ai_search: Dict[str, str] = rest_field() + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + azure_ai_search: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="azure_ai_search", **kwargs) + + +class RunStepBingGroundingToolCall(RunStepToolCall, discriminator="bing_grounding"): + """A record of a call to a bing grounding tool, issued by the model in evaluation of a defined + tool, that represents + executed search with bing grounding. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is + "bing_grounding". + :vartype type: str + :ivar bing_grounding: Reserved for future use. Required. + :vartype bing_grounding: dict[str, str] + """ + + type: Literal["bing_grounding"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'bing_grounding'. Required. Default value is + \"bing_grounding\".""" + bing_grounding: Dict[str, str] = rest_field() + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + bing_grounding: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="bing_grounding", **kwargs) + + +class RunStepCodeInterpreterToolCallOutput(_model_base.Model): + """An abstract representation of an emitted output from a code interpreter tool. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepCodeInterpreterImageOutput, RunStepCodeInterpreterLogOutput + + + :ivar type: The object type. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCodeInterpreterImageOutput(RunStepCodeInterpreterToolCallOutput, discriminator="image"): + """A representation of an image output emitted by a code interpreter tool in response to a tool + call by the model. + + + :ivar type: The object type, which is always 'image'. Required. Default value is "image". + :vartype type: str + :ivar image: Referential information for the image associated with this output. Required. + :vartype image: ~azure.ai.projects.models.RunStepCodeInterpreterImageReference + """ + + type: Literal["image"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'image'. Required. Default value is \"image\".""" + image: "_models.RunStepCodeInterpreterImageReference" = rest_field() + """Referential information for the image associated with this output. Required.""" + + @overload + def __init__( + self, + *, + image: "_models.RunStepCodeInterpreterImageReference", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image", **kwargs) + + +class RunStepCodeInterpreterImageReference(_model_base.Model): + """An image reference emitted by a code interpreter tool in response to a tool call by the model. + + + :ivar file_id: The ID of the file associated with this image. Required. + :vartype file_id: str + """ + + file_id: str = rest_field() + """The ID of the file associated with this image. Required.""" + + @overload + def __init__( + self, + *, + file_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCodeInterpreterLogOutput(RunStepCodeInterpreterToolCallOutput, discriminator="logs"): + """A representation of a log output emitted by a code interpreter tool in response to a tool call + by the model. + + + :ivar type: The object type, which is always 'logs'. Required. Default value is "logs". + :vartype type: str + :ivar logs: The serialized log output emitted by the code interpreter. Required. + :vartype logs: str + """ + + type: Literal["logs"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'logs'. Required. Default value is \"logs\".""" + logs: str = rest_field() + """The serialized log output emitted by the code interpreter. Required.""" + + @overload + def __init__( + self, + *, + logs: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="logs", **kwargs) + + +class RunStepCodeInterpreterToolCall(RunStepToolCall, discriminator="code_interpreter"): + """A record of a call to a code interpreter tool, issued by the model in evaluation of a defined + tool, that + represents inputs and outputs consumed and emitted by the code interpreter. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is + "code_interpreter". + :vartype type: str + :ivar code_interpreter: The details of the tool call to the code interpreter tool. Required. + :vartype code_interpreter: ~azure.ai.projects.models.RunStepCodeInterpreterToolCallDetails + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'code_interpreter'. Required. Default value is + \"code_interpreter\".""" + code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails" = rest_field() + """The details of the tool call to the code interpreter tool. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="code_interpreter", **kwargs) + + +class RunStepCodeInterpreterToolCallDetails(_model_base.Model): + """The detailed information about a code interpreter invocation by the model. + + + :ivar input: The input provided by the model to the code interpreter tool. Required. + :vartype input: str + :ivar outputs: The outputs produced by the code interpreter tool back to the model in response + to the tool call. Required. + :vartype outputs: list[~azure.ai.projects.models.RunStepCodeInterpreterToolCallOutput] + """ + + input: str = rest_field() + """The input provided by the model to the code interpreter tool. Required.""" + outputs: List["_models.RunStepCodeInterpreterToolCallOutput"] = rest_field() + """The outputs produced by the code interpreter tool back to the model in response to the tool + call. Required.""" + + @overload + def __init__( + self, + *, + input: str, + outputs: List["_models.RunStepCodeInterpreterToolCallOutput"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepCompletionUsage(_model_base.Model): + """Usage statistics related to the run step. + + + :ivar completion_tokens: Number of completion tokens used over the course of the run step. + Required. + :vartype completion_tokens: int + :ivar prompt_tokens: Number of prompt tokens used over the course of the run step. Required. + :vartype prompt_tokens: int + :ivar total_tokens: Total number of tokens used (prompt + completion). Required. + :vartype total_tokens: int + """ + + completion_tokens: int = rest_field() + """Number of completion tokens used over the course of the run step. Required.""" + prompt_tokens: int = rest_field() + """Number of prompt tokens used over the course of the run step. Required.""" + total_tokens: int = rest_field() + """Total number of tokens used (prompt + completion). Required.""" + + @overload + def __init__( + self, + *, + completion_tokens: int, + prompt_tokens: int, + total_tokens: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDelta(_model_base.Model): + """Represents the delta payload in a streaming run step delta chunk. + + :ivar step_details: The details of the run step. + :vartype step_details: ~azure.ai.projects.models.RunStepDeltaDetail + """ + + step_details: Optional["_models.RunStepDeltaDetail"] = rest_field() + """The details of the run step.""" + + @overload + def __init__( + self, + *, + step_details: Optional["_models.RunStepDeltaDetail"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaChunk(_model_base.Model): + """Represents a run step delta i.e. any changed fields on a run step during streaming. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier of the run step, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``thread.run.step.delta``. Required. Default + value is "thread.run.step.delta". + :vartype object: str + :ivar delta: The delta containing the fields that have changed on the run step. Required. + :vartype delta: ~azure.ai.projects.models.RunStepDelta + """ + + id: str = rest_field() + """The identifier of the run step, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run.step.delta"] = rest_field() + """The object type, which is always ``thread.run.step.delta``. Required. Default value is + \"thread.run.step.delta\".""" + delta: "_models.RunStepDelta" = rest_field() + """The delta containing the fields that have changed on the run step. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + delta: "_models.RunStepDelta", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run.step.delta"] = "thread.run.step.delta" + + +class RunStepDeltaCodeInterpreterDetailItemObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the Code Interpreter tool call data in a streaming run step's tool calls. + + :ivar input: The input into the Code Interpreter tool call. + :vartype input: str + :ivar outputs: The outputs from the Code Interpreter tool call. Code Interpreter can output one + or more + items, including text (\\ ``logs``\\ ) or images (\\ ``image``\\ ). Each of these are + represented by a + different object type. + :vartype outputs: list[~azure.ai.projects.models.RunStepDeltaCodeInterpreterOutput] + """ + + input: Optional[str] = rest_field() + """The input into the Code Interpreter tool call.""" + outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = rest_field() + """The outputs from the Code Interpreter tool call. Code Interpreter can output one or more + items, including text (\ ``logs``\ ) or images (\ ``image``\ ). Each of these are represented + by a + different object type.""" + + @overload + def __init__( + self, + *, + input: Optional[str] = None, + outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterOutput(_model_base.Model): + """The abstract base representation of a streaming run step tool call's Code Interpreter tool + output. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaCodeInterpreterImageOutput, RunStepDeltaCodeInterpreterLogOutput + + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The type of the streaming run step tool call's Code Interpreter output. Required. + Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field() + """The index of the output in the streaming run step tool call's Code Interpreter outputs array. + Required.""" + type: str = rest_discriminator(name="type") + """The type of the streaming run step tool call's Code Interpreter output. Required. Default value + is None.""" + + @overload + def __init__( + self, + *, + index: int, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterImageOutput(RunStepDeltaCodeInterpreterOutput, discriminator="image"): + """Represents an image output as produced the Code interpreter tool and as represented in a + streaming run step's delta tool calls collection. + + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The object type, which is always "image.". Required. Default value is "image". + :vartype type: str + :ivar image: The image data for the Code Interpreter tool call output. + :vartype image: ~azure.ai.projects.models.RunStepDeltaCodeInterpreterImageOutputObject + """ + + type: Literal["image"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"image.\". Required. Default value is \"image\".""" + image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = rest_field() + """The image data for the Code Interpreter tool call output.""" + + @overload + def __init__( + self, + *, + index: int, + image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="image", **kwargs) + + +class RunStepDeltaCodeInterpreterImageOutputObject(_model_base.Model): # pylint: disable=name-too-long + """Represents the data for a streaming run step's Code Interpreter tool call image output. + + :ivar file_id: The file ID for the image. + :vartype file_id: str + """ + + file_id: Optional[str] = rest_field() + """The file ID for the image.""" + + @overload + def __init__( + self, + *, + file_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterLogOutput(RunStepDeltaCodeInterpreterOutput, discriminator="logs"): + """Represents a log output as produced by the Code Interpreter tool and as represented in a + streaming run step's delta tool calls collection. + + + :ivar index: The index of the output in the streaming run step tool call's Code Interpreter + outputs array. Required. + :vartype index: int + :ivar type: The type of the object, which is always "logs.". Required. Default value is "logs". + :vartype type: str + :ivar logs: The text output from the Code Interpreter tool call. + :vartype logs: str + """ + + type: Literal["logs"] = rest_discriminator(name="type") # type: ignore + """The type of the object, which is always \"logs.\". Required. Default value is \"logs\".""" + logs: Optional[str] = rest_field() + """The text output from the Code Interpreter tool call.""" + + @overload + def __init__( + self, + *, + index: int, + logs: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="logs", **kwargs) + + +class RunStepDeltaToolCall(_model_base.Model): + """The abstract base representation of a single tool call within a streaming run step's delta tool + call details. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaCodeInterpreterToolCall, RunStepDeltaFileSearchToolCall, + RunStepDeltaFunctionToolCall + + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The type of the tool call detail item in a streaming run step's details. Required. + Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + index: int = rest_field() + """The index of the tool call detail in the run step's tool_calls array. Required.""" + id: str = rest_field() + """The ID of the tool call, used when submitting outputs to the run. Required.""" + type: str = rest_discriminator(name="type") + """The type of the tool call detail item in a streaming run step's details. Required. Default + value is None.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaCodeInterpreterToolCall(RunStepDeltaToolCall, discriminator="code_interpreter"): + """Represents a Code Interpreter tool call within a streaming run step's tool call details. + + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "code_interpreter.". Required. Default value is + "code_interpreter". + :vartype type: str + :ivar code_interpreter: The Code Interpreter data for the tool call. + :vartype code_interpreter: + ~azure.ai.projects.models.RunStepDeltaCodeInterpreterDetailItemObject + """ + + type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"code_interpreter.\". Required. Default value is + \"code_interpreter\".""" + code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = rest_field() + """The Code Interpreter data for the tool call.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="code_interpreter", **kwargs) + + +class RunStepDeltaDetail(_model_base.Model): + """Represents a single run step detail item in a streaming run step's delta payload. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepDeltaMessageCreation, RunStepDeltaToolCallObject + + + :ivar type: The object type for the run step detail object. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type for the run step detail object. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaFileSearchToolCall(RunStepDeltaToolCall, discriminator="file_search"): + """Represents a file search tool call within a streaming run step's tool call details. + + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "file_search.". Required. Default value is + "file_search". + :vartype type: str + :ivar file_search: Reserved for future use. + :vartype file_search: dict[str, str] + """ + + type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"file_search.\". Required. Default value is \"file_search\".""" + file_search: Optional[Dict[str, str]] = rest_field() + """Reserved for future use.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + file_search: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_search", **kwargs) + + +class RunStepDeltaFunction(_model_base.Model): + """Represents the function data in a streaming run step delta's function tool call. + + :ivar name: The name of the function. + :vartype name: str + :ivar arguments: The arguments passed to the function as input. + :vartype arguments: str + :ivar output: The output of the function, null if outputs have not yet been submitted. + :vartype output: str + """ + + name: Optional[str] = rest_field() + """The name of the function.""" + arguments: Optional[str] = rest_field() + """The arguments passed to the function as input.""" + output: Optional[str] = rest_field() + """The output of the function, null if outputs have not yet been submitted.""" + + @overload + def __init__( + self, + *, + name: Optional[str] = None, + arguments: Optional[str] = None, + output: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaFunctionToolCall(RunStepDeltaToolCall, discriminator="function"): + """Represents a function tool call within a streaming run step's tool call details. + + + :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. + :vartype index: int + :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. + :vartype id: str + :ivar type: The object type, which is always "function.". Required. Default value is + "function". + :vartype type: str + :ivar function: The function data for the tool call. + :vartype function: ~azure.ai.projects.models.RunStepDeltaFunction + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"function.\". Required. Default value is \"function\".""" + function: Optional["_models.RunStepDeltaFunction"] = rest_field() + """The function data for the tool call.""" + + @overload + def __init__( + self, + *, + index: int, + id: str, # pylint: disable=redefined-builtin + function: Optional["_models.RunStepDeltaFunction"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class RunStepDeltaMessageCreation(RunStepDeltaDetail, discriminator="message_creation"): + """Represents a message creation within a streaming run step delta. + + + :ivar type: The object type, which is always "message_creation.". Required. Default value is + "message_creation". + :vartype type: str + :ivar message_creation: The message creation data. + :vartype message_creation: ~azure.ai.projects.models.RunStepDeltaMessageCreationObject + """ + + type: Literal["message_creation"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"message_creation.\". Required. Default value is + \"message_creation\".""" + message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = rest_field() + """The message creation data.""" + + @overload + def __init__( + self, + *, + message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="message_creation", **kwargs) + + +class RunStepDeltaMessageCreationObject(_model_base.Model): + """Represents the data within a streaming run step message creation response object. + + :ivar message_id: The ID of the newly-created message. + :vartype message_id: str + """ + + message_id: Optional[str] = rest_field() + """The ID of the newly-created message.""" + + @overload + def __init__( + self, + *, + message_id: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepDeltaToolCallObject(RunStepDeltaDetail, discriminator="tool_calls"): + """Represents an invocation of tool calls as part of a streaming run step. + + + :ivar type: The object type, which is always "tool_calls.". Required. Default value is + "tool_calls". + :vartype type: str + :ivar tool_calls: The collection of tool calls for the tool call detail item. + :vartype tool_calls: list[~azure.ai.projects.models.RunStepDeltaToolCall] + """ + + type: Literal["tool_calls"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always \"tool_calls.\". Required. Default value is \"tool_calls\".""" + tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = rest_field() + """The collection of tool calls for the tool call detail item.""" + + @overload + def __init__( + self, + *, + tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="tool_calls", **kwargs) + + +class RunStepDetails(_model_base.Model): + """An abstract representation of the details for a run step. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + RunStepMessageCreationDetails, RunStepToolCallDetails + + + :ivar type: The object type. Required. Known values are: "message_creation" and "tool_calls". + :vartype type: str or ~azure.ai.projects.models.RunStepType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Known values are: \"message_creation\" and \"tool_calls\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepError(_model_base.Model): + """The error information associated with a failed run step. + + + :ivar code: The error code for this error. Required. Known values are: "server_error" and + "rate_limit_exceeded". + :vartype code: str or ~azure.ai.projects.models.RunStepErrorCode + :ivar message: The human-readable text associated with this error. Required. + :vartype message: str + """ + + code: Union[str, "_models.RunStepErrorCode"] = rest_field() + """The error code for this error. Required. Known values are: \"server_error\" and + \"rate_limit_exceeded\".""" + message: str = rest_field() + """The human-readable text associated with this error. Required.""" + + @overload + def __init__( + self, + *, + code: Union[str, "_models.RunStepErrorCode"], + message: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepFileSearchToolCall(RunStepToolCall, discriminator="file_search"): + """A record of a call to a file search tool, issued by the model in evaluation of a defined tool, + that represents + executed file search. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'file_search'. Required. Default value is + "file_search". + :vartype type: str + :ivar file_search: Reserved for future use. Required. + :vartype file_search: dict[str, str] + """ + + type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" + file_search: Dict[str, str] = rest_field() + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + file_search: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="file_search", **kwargs) + + +class RunStepFunctionToolCall(RunStepToolCall, discriminator="function"): + """A record of a call to a function tool, issued by the model in evaluation of a defined tool, + that represents the inputs + and output consumed and emitted by the specified function. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'function'. Required. Default value is "function". + :vartype type: str + :ivar function: The detailed information about the function called by the model. Required. + :vartype function: ~azure.ai.projects.models.RunStepFunctionToolCallDetails + """ + + type: Literal["function"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'function'. Required. Default value is \"function\".""" + function: "_models.RunStepFunctionToolCallDetails" = rest_field() + """The detailed information about the function called by the model. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + function: "_models.RunStepFunctionToolCallDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="function", **kwargs) + + +class RunStepFunctionToolCallDetails(_model_base.Model): + """The detailed information about the function called by the model. + + + :ivar name: The name of the function. Required. + :vartype name: str + :ivar arguments: The arguments that the model requires are provided to the named function. + Required. + :vartype arguments: str + :ivar output: The output of the function, only populated for function calls that have already + have had their outputs submitted. Required. + :vartype output: str + """ + + name: str = rest_field() + """The name of the function. Required.""" + arguments: str = rest_field() + """The arguments that the model requires are provided to the named function. Required.""" + output: str = rest_field() + """The output of the function, only populated for function calls that have already have had their + outputs submitted. Required.""" + + @overload + def __init__( + self, + *, + name: str, + arguments: str, + output: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepMessageCreationDetails(RunStepDetails, discriminator="message_creation"): + """The detailed information associated with a message creation run step. + + + :ivar type: The object type, which is always 'message_creation'. Required. Represents a run + step to create a message. + :vartype type: str or ~azure.ai.projects.models.MESSAGE_CREATION + :ivar message_creation: Information about the message creation associated with this run step. + Required. + :vartype message_creation: ~azure.ai.projects.models.RunStepMessageCreationReference + """ + + type: Literal[RunStepType.MESSAGE_CREATION] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'message_creation'. Required. Represents a run step to create + a message.""" + message_creation: "_models.RunStepMessageCreationReference" = rest_field() + """Information about the message creation associated with this run step. Required.""" + + @overload + def __init__( + self, + *, + message_creation: "_models.RunStepMessageCreationReference", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=RunStepType.MESSAGE_CREATION, **kwargs) + + +class RunStepMessageCreationReference(_model_base.Model): + """The details of a message created as a part of a run step. + + + :ivar message_id: The ID of the message created by this run step. Required. + :vartype message_id: str + """ + + message_id: str = rest_field() + """The ID of the message created by this run step. Required.""" + + @overload + def __init__( + self, + *, + message_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class RunStepMicrosoftFabricToolCall(RunStepToolCall, discriminator="microsoft_fabric"): + """A record of a call to a Microsoft Fabric tool, issued by the model in evaluation of a defined + tool, that represents + executed Microsoft Fabric operations. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'microsoft_fabric'. Required. Default value is + "microsoft_fabric". + :vartype type: str + :ivar microsoft_fabric: Reserved for future use. Required. + :vartype microsoft_fabric: dict[str, str] + """ + + type: Literal["microsoft_fabric"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'microsoft_fabric'. Required. Default value is + \"microsoft_fabric\".""" + microsoft_fabric: Dict[str, str] = rest_field() + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + microsoft_fabric: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="microsoft_fabric", **kwargs) + + +class RunStepSharepointToolCall(RunStepToolCall, discriminator="sharepoint"): + """A record of a call to a SharePoint tool, issued by the model in evaluation of a defined tool, + that represents + executed SharePoint actions. + + + :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. + Required. + :vartype id: str + :ivar type: The object type, which is always 'sharepoint'. Required. Default value is + "sharepoint". + :vartype type: str + :ivar share_point: Reserved for future use. Required. + :vartype share_point: dict[str, str] + """ + + type: Literal["sharepoint"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'sharepoint'. Required. Default value is \"sharepoint\".""" + share_point: Dict[str, str] = rest_field(name="sharepoint") + """Reserved for future use. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + share_point: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="sharepoint", **kwargs) + + +class RunStepToolCallDetails(RunStepDetails, discriminator="tool_calls"): + """The detailed information associated with a run step calling tools. + + + :ivar type: The object type, which is always 'tool_calls'. Required. Represents a run step that + calls tools. + :vartype type: str or ~azure.ai.projects.models.TOOL_CALLS + :ivar tool_calls: A list of tool call details for this run step. Required. + :vartype tool_calls: list[~azure.ai.projects.models.RunStepToolCall] + """ + + type: Literal[RunStepType.TOOL_CALLS] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'tool_calls'. Required. Represents a run step that calls + tools.""" + tool_calls: List["_models.RunStepToolCall"] = rest_field() + """A list of tool call details for this run step. Required.""" + + @overload + def __init__( + self, + *, + tool_calls: List["_models.RunStepToolCall"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=RunStepType.TOOL_CALLS, **kwargs) + + +class SamplingStrategy(_model_base.Model): + """SamplingStrategy Definition. + + + :ivar rate: Sampling rate. Required. + :vartype rate: float + """ + + rate: float = rest_field() + """Sampling rate. Required.""" + + @overload + def __init__( + self, + *, + rate: float, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SharepointToolDefinition(ToolDefinition, discriminator="sharepoint"): + """The input definition information for a sharepoint tool as used to configure an agent. + + + :ivar type: The object type, which is always 'sharepoint'. Required. Default value is + "sharepoint". + :vartype type: str + """ + + type: Literal["sharepoint"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'sharepoint'. Required. Default value is \"sharepoint\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="sharepoint", **kwargs) + + +class SubmitToolOutputsAction(RequiredAction, discriminator="submit_tool_outputs"): + """The details for required tool calls that must be submitted for an agent thread run to continue. + + + :ivar type: The object type, which is always 'submit_tool_outputs'. Required. Default value is + "submit_tool_outputs". + :vartype type: str + :ivar submit_tool_outputs: The details describing tools that should be called to submit tool + outputs. Required. + :vartype submit_tool_outputs: ~azure.ai.projects.models.SubmitToolOutputsDetails + """ + + type: Literal["submit_tool_outputs"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'submit_tool_outputs'. Required. Default value is + \"submit_tool_outputs\".""" + submit_tool_outputs: "_models.SubmitToolOutputsDetails" = rest_field() + """The details describing tools that should be called to submit tool outputs. Required.""" + + @overload + def __init__( + self, + *, + submit_tool_outputs: "_models.SubmitToolOutputsDetails", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="submit_tool_outputs", **kwargs) + + +class SubmitToolOutputsDetails(_model_base.Model): + """The details describing tools that should be called to submit tool outputs. + + + :ivar tool_calls: The list of tool calls that must be resolved for the agent thread run to + continue. Required. + :vartype tool_calls: list[~azure.ai.projects.models.RequiredToolCall] + """ + + tool_calls: List["_models.RequiredToolCall"] = rest_field() + """The list of tool calls that must be resolved for the agent thread run to continue. Required.""" + + @overload + def __init__( + self, + *, + tool_calls: List["_models.RequiredToolCall"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SystemData(_model_base.Model): + """Metadata pertaining to creation and last modification of the resource. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar created_at: The timestamp the resource was created at. + :vartype created_at: ~datetime.datetime + :ivar created_by: The identity that created the resource. + :vartype created_by: str + :ivar created_by_type: The identity type that created the resource. + :vartype created_by_type: str + :ivar last_modified_at: The timestamp of resource last modification (UTC). + :vartype last_modified_at: ~datetime.datetime + """ + + created_at: Optional[datetime.datetime] = rest_field(name="createdAt", visibility=["read"], format="rfc3339") + """The timestamp the resource was created at.""" + created_by: Optional[str] = rest_field(name="createdBy", visibility=["read"]) + """The identity that created the resource.""" + created_by_type: Optional[str] = rest_field(name="createdByType", visibility=["read"]) + """The identity type that created the resource.""" + last_modified_at: Optional[datetime.datetime] = rest_field( + name="lastModifiedAt", visibility=["read"], format="rfc3339" + ) + """The timestamp of resource last modification (UTC).""" + + +class ThreadDeletionStatus(_model_base.Model): + """The status of a thread deletion operation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'thread.deleted'. Required. Default value is + "thread.deleted". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["thread.deleted"] = rest_field() + """The object type, which is always 'thread.deleted'. Required. Default value is + \"thread.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.deleted"] = "thread.deleted" + + +class ThreadMessage(_model_base.Model): + """A single, existing message within an agent thread. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.message'. Required. Default value is + "thread.message". + :vartype object: str + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar thread_id: The ID of the thread that this message belongs to. Required. + :vartype thread_id: str + :ivar status: The status of the message. Required. Known values are: "in_progress", + "incomplete", and "completed". + :vartype status: str or ~azure.ai.projects.models.MessageStatus + :ivar incomplete_details: On an incomplete message, details about why the message is + incomplete. Required. + :vartype incomplete_details: ~azure.ai.projects.models.MessageIncompleteDetails + :ivar completed_at: The Unix timestamp (in seconds) for when the message was completed. + Required. + :vartype completed_at: ~datetime.datetime + :ivar incomplete_at: The Unix timestamp (in seconds) for when the message was marked as + incomplete. Required. + :vartype incomplete_at: ~datetime.datetime + :ivar role: The role associated with the agent thread message. Required. Known values are: + "user" and "assistant". + :vartype role: str or ~azure.ai.projects.models.MessageRole + :ivar content: The list of content items associated with the agent thread message. Required. + :vartype content: list[~azure.ai.projects.models.MessageContent] + :ivar assistant_id: If applicable, the ID of the agent that authored this message. Required. + :vartype assistant_id: str + :ivar run_id: If applicable, the ID of the run associated with the authoring of this message. + Required. + :vartype run_id: str + :ivar attachments: A list of files attached to the message, and the tools they were added to. + Required. + :vartype attachments: list[~azure.ai.projects.models.MessageAttachment] + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.message"] = rest_field() + """The object type, which is always 'thread.message'. Required. Default value is + \"thread.message\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + thread_id: str = rest_field() + """The ID of the thread that this message belongs to. Required.""" + status: Union[str, "_models.MessageStatus"] = rest_field() + """The status of the message. Required. Known values are: \"in_progress\", \"incomplete\", and + \"completed\".""" + incomplete_details: "_models.MessageIncompleteDetails" = rest_field() + """On an incomplete message, details about why the message is incomplete. Required.""" + completed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the message was completed. Required.""" + incomplete_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the message was marked as incomplete. Required.""" + role: Union[str, "_models.MessageRole"] = rest_field() + """The role associated with the agent thread message. Required. Known values are: \"user\" and + \"assistant\".""" + content: List["_models.MessageContent"] = rest_field() + """The list of content items associated with the agent thread message. Required.""" + assistant_id: str = rest_field() + """If applicable, the ID of the agent that authored this message. Required.""" + run_id: str = rest_field() + """If applicable, the ID of the run associated with the authoring of this message. Required.""" + attachments: List["_models.MessageAttachment"] = rest_field() + """A list of files attached to the message, and the tools they were added to. Required.""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + thread_id: str, + status: Union[str, "_models.MessageStatus"], + incomplete_details: "_models.MessageIncompleteDetails", + completed_at: datetime.datetime, + incomplete_at: datetime.datetime, + role: Union[str, "_models.MessageRole"], + content: List["_models.MessageContent"], + assistant_id: str, + run_id: str, + attachments: List["_models.MessageAttachment"], + metadata: Dict[str, str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.message"] = "thread.message" + + +class ThreadMessageOptions(_model_base.Model): + """A single message within an agent thread, as provided during that thread's creation for its + initial state. + + All required parameters must be populated in order to send to server. + + :ivar role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Required. Known values are: "user" and "assistant". + :vartype role: str or ~azure.ai.projects.models.MessageRole + :ivar content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :vartype content: str + :ivar attachments: A list of files attached to the message, and the tools they should be added + to. + :vartype attachments: list[~azure.ai.projects.models.MessageAttachment] + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. + :vartype metadata: dict[str, str] + """ + + role: Union[str, "_models.MessageRole"] = rest_field() + """The role of the entity that is creating the message. Allowed values include: + + + * ``user``\ : Indicates the message is sent by an actual user and should be used in most cases + to represent user-generated messages. + * ``assistant``\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Required. Known values are: \"user\" and \"assistant\".""" + content: str = rest_field() + """The textual content of the initial message. Currently, robust input including images and + annotated text may only be provided via + a separate call to the create message API. Required.""" + attachments: Optional[List["_models.MessageAttachment"]] = rest_field() + """A list of files attached to the message, and the tools they should be added to.""" + metadata: Optional[Dict[str, str]] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length.""" + + @overload + def __init__( + self, + *, + role: Union[str, "_models.MessageRole"], + content: str, + attachments: Optional[List["_models.MessageAttachment"]] = None, + metadata: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ThreadRun(_model_base.Model): + """Data representing a single evaluation run of an agent thread. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always 'thread.run'. Required. Default value is + "thread.run". + :vartype object: str + :ivar thread_id: The ID of the thread associated with this run. Required. + :vartype thread_id: str + :ivar assistant_id: The ID of the agent associated with the thread this run was performed + against. Required. + :vartype assistant_id: str + :ivar status: The status of the agent thread run. Required. Known values are: "queued", + "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", and + "expired". + :vartype status: str or ~azure.ai.projects.models.RunStatus + :ivar required_action: The details of the action required for the agent thread run to continue. + :vartype required_action: ~azure.ai.projects.models.RequiredAction + :ivar last_error: The last error, if any, encountered by this agent thread run. Required. + :vartype last_error: ~azure.ai.projects.models.RunError + :ivar model: The ID of the model to use. Required. + :vartype model: str + :ivar instructions: The overridden system instructions used for this agent thread run. + Required. + :vartype instructions: str + :ivar tools: The overridden enabled tools used for this agent thread run. Required. + :vartype tools: list[~azure.ai.projects.models.ToolDefinition] + :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar expires_at: The Unix timestamp, in seconds, representing when this item expires. + Required. + :vartype expires_at: ~datetime.datetime + :ivar started_at: The Unix timestamp, in seconds, representing when this item was started. + Required. + :vartype started_at: ~datetime.datetime + :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. + :vartype completed_at: ~datetime.datetime + :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. + Required. + :vartype cancelled_at: ~datetime.datetime + :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. + :vartype failed_at: ~datetime.datetime + :ivar incomplete_details: Details on why the run is incomplete. Will be ``null`` if the run is + not incomplete. Required. Known values are: "max_completion_tokens" and "max_prompt_tokens". + :vartype incomplete_details: str or ~azure.ai.projects.models.IncompleteRunDetails + :ivar usage: Usage statistics related to the run. This value will be ``null`` if the run is not + in a terminal state (i.e. ``in_progress``\\ , ``queued``\\ , etc.). Required. + :vartype usage: ~azure.ai.projects.models.RunCompletionUsage + :ivar temperature: The sampling temperature used for this run. If not set, defaults to 1. + :vartype temperature: float + :ivar top_p: The nucleus sampling value used for this run. If not set, defaults to 1. + :vartype top_p: float + :ivar max_prompt_tokens: The maximum number of prompt tokens specified to have been used over + the course of the run. Required. + :vartype max_prompt_tokens: int + :ivar max_completion_tokens: The maximum number of completion tokens specified to have been + used over the course of the run. Required. + :vartype max_completion_tokens: int + :ivar truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Required. + :vartype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :ivar tool_choice: Controls whether or not and which tool is called by the model. Required. Is + one of the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice + :vartype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AgentsNamedToolChoice + :ivar response_format: The response format of the tool calls used in this run. Required. Is one + of the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat + :vartype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + :ivar tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. + :vartype tool_resources: ~azure.ai.projects.models.UpdateToolResourcesOptions + :ivar parallel_tool_calls: Determines if tools can be executed in parallel within the run. + :vartype parallel_tool_calls: bool + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["thread.run"] = rest_field() + """The object type, which is always 'thread.run'. Required. Default value is \"thread.run\".""" + thread_id: str = rest_field() + """The ID of the thread associated with this run. Required.""" + assistant_id: str = rest_field() + """The ID of the agent associated with the thread this run was performed against. Required.""" + status: Union[str, "_models.RunStatus"] = rest_field() + """The status of the agent thread run. Required. Known values are: \"queued\", \"in_progress\", + \"requires_action\", \"cancelling\", \"cancelled\", \"failed\", \"completed\", and \"expired\".""" + required_action: Optional["_models.RequiredAction"] = rest_field() + """The details of the action required for the agent thread run to continue.""" + last_error: "_models.RunError" = rest_field() + """The last error, if any, encountered by this agent thread run. Required.""" + model: str = rest_field() + """The ID of the model to use. Required.""" + instructions: str = rest_field() + """The overridden system instructions used for this agent thread run. Required.""" + tools: List["_models.ToolDefinition"] = rest_field() + """The overridden enabled tools used for this agent thread run. Required.""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this object was created. Required.""" + expires_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this item expires. Required.""" + started_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this item was started. Required.""" + completed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this completed. Required.""" + cancelled_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" + failed_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp, in seconds, representing when this failed. Required.""" + incomplete_details: Union[str, "_models.IncompleteRunDetails"] = rest_field() + """Details on why the run is incomplete. Will be ``null`` if the run is not incomplete. Required. + Known values are: \"max_completion_tokens\" and \"max_prompt_tokens\".""" + usage: "_models.RunCompletionUsage" = rest_field() + """Usage statistics related to the run. This value will be ``null`` if the run is not in a + terminal state (i.e. ``in_progress``\ , ``queued``\ , etc.). Required.""" + temperature: Optional[float] = rest_field() + """The sampling temperature used for this run. If not set, defaults to 1.""" + top_p: Optional[float] = rest_field() + """The nucleus sampling value used for this run. If not set, defaults to 1.""" + max_prompt_tokens: int = rest_field() + """The maximum number of prompt tokens specified to have been used over the course of the run. + Required.""" + max_completion_tokens: int = rest_field() + """The maximum number of completion tokens specified to have been used over the course of the run. + Required.""" + truncation_strategy: "_models.TruncationObject" = rest_field() + """The strategy to use for dropping messages as the context windows moves forward. Required.""" + tool_choice: "_types.AgentsApiToolChoiceOption" = rest_field() + """Controls whether or not and which tool is called by the model. Required. Is one of the + following types: str, Union[str, \"_models.AgentsApiToolChoiceOptionMode\"], + AgentsNamedToolChoice""" + response_format: "_types.AgentsApiResponseFormatOption" = rest_field() + """The response format of the tool calls used in this run. Required. Is one of the following + types: str, Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + tool_resources: Optional["_models.UpdateToolResourcesOptions"] = rest_field() + """Override the tools the agent can use for this run. This is useful for modifying the behavior on + a per-run basis.""" + parallel_tool_calls: Optional[bool] = rest_field(name="parallelToolCalls") + """Determines if tools can be executed in parallel within the run.""" + + @overload + def __init__( # pylint: disable=too-many-locals + self, + *, + id: str, # pylint: disable=redefined-builtin + thread_id: str, + assistant_id: str, + status: Union[str, "_models.RunStatus"], + last_error: "_models.RunError", + model: str, + instructions: str, + tools: List["_models.ToolDefinition"], + created_at: datetime.datetime, + expires_at: datetime.datetime, + started_at: datetime.datetime, + completed_at: datetime.datetime, + cancelled_at: datetime.datetime, + failed_at: datetime.datetime, + incomplete_details: Union[str, "_models.IncompleteRunDetails"], + usage: "_models.RunCompletionUsage", + max_prompt_tokens: int, + max_completion_tokens: int, + truncation_strategy: "_models.TruncationObject", + tool_choice: "_types.AgentsApiToolChoiceOption", + response_format: "_types.AgentsApiResponseFormatOption", + metadata: Dict[str, str], + required_action: Optional["_models.RequiredAction"] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + tool_resources: Optional["_models.UpdateToolResourcesOptions"] = None, + parallel_tool_calls: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["thread.run"] = "thread.run" + + +class ToolOutput(_model_base.Model): + """The data provided during a tool outputs submission to resolve pending tool calls and allow the + model to continue. + + :ivar tool_call_id: The ID of the tool call being resolved, as provided in the tool calls of a + required action from a run. + :vartype tool_call_id: str + :ivar output: The output from the tool to be submitted. + :vartype output: str + """ + + tool_call_id: Optional[str] = rest_field() + """The ID of the tool call being resolved, as provided in the tool calls of a required action from + a run.""" + output: Optional[str] = rest_field() + """The output from the tool to be submitted.""" + + @overload + def __init__( + self, + *, + tool_call_id: Optional[str] = None, + output: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolResources(_model_base.Model): + """A set of resources that are used by the agent's tools. The resources are specific to the type + of + tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` + tool requires a list of vector store IDs. + + :ivar code_interpreter: Resources to be used by the ``code_interpreter tool`` consisting of + file IDs. + :vartype code_interpreter: ~azure.ai.projects.models.CodeInterpreterToolResource + :ivar file_search: Resources to be used by the ``file_search`` tool consisting of vector store + IDs. + :vartype file_search: ~azure.ai.projects.models.FileSearchToolResource + :ivar bing_grounding: Resources to be used by the ``bing_grounding`` tool consisting of + connection IDs. + :vartype bing_grounding: ~azure.ai.projects.models.ConnectionListResource + :ivar microsoft_fabric: Resources to be used by the ``microsoft_fabric`` tool consisting of + connection IDs. + :vartype microsoft_fabric: ~azure.ai.projects.models.ConnectionListResource + :ivar share_point: Resources to be used by the ``sharepoint`` tool consisting of connection + IDs. + :vartype share_point: ~azure.ai.projects.models.ConnectionListResource + :ivar azure_ai_search: Resources to be used by the ``azure_ai_search`` tool consisting of index + IDs and names. + :vartype azure_ai_search: ~azure.ai.projects.models.AzureAISearchResource + """ + + code_interpreter: Optional["_models.CodeInterpreterToolResource"] = rest_field() + """Resources to be used by the ``code_interpreter tool`` consisting of file IDs.""" + file_search: Optional["_models.FileSearchToolResource"] = rest_field() + """Resources to be used by the ``file_search`` tool consisting of vector store IDs.""" + bing_grounding: Optional["_models.ConnectionListResource"] = rest_field() + """Resources to be used by the ``bing_grounding`` tool consisting of connection IDs.""" + microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() + """Resources to be used by the ``microsoft_fabric`` tool consisting of connection IDs.""" + share_point: Optional["_models.ConnectionListResource"] = rest_field(name="sharepoint") + """Resources to be used by the ``sharepoint`` tool consisting of connection IDs.""" + azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() + """Resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names.""" + + @overload + def __init__( + self, + *, + code_interpreter: Optional["_models.CodeInterpreterToolResource"] = None, + file_search: Optional["_models.FileSearchToolResource"] = None, + bing_grounding: Optional["_models.ConnectionListResource"] = None, + microsoft_fabric: Optional["_models.ConnectionListResource"] = None, + share_point: Optional["_models.ConnectionListResource"] = None, + azure_ai_search: Optional["_models.AzureAISearchResource"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TruncationObject(_model_base.Model): + """Controls for how a thread will be truncated prior to the run. Use this to control the initial + context window of the run. + + + :ivar type: The truncation strategy to use for the thread. The default is ``auto``. If set to + ``last_messages``\\ , the thread will + be truncated to the ``lastMessages`` count most recent messages in the thread. When set to + ``auto``\\ , messages in the middle of the thread + will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known + values are: "auto" and "last_messages". + :vartype type: str or ~azure.ai.projects.models.TruncationStrategy + :ivar last_messages: The number of most recent messages from the thread when constructing the + context for the run. + :vartype last_messages: int + """ + + type: Union[str, "_models.TruncationStrategy"] = rest_field() + """The truncation strategy to use for the thread. The default is ``auto``. If set to + ``last_messages``\ , the thread will + be truncated to the ``lastMessages`` count most recent messages in the thread. When set to + ``auto``\ , messages in the middle of the thread + will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known + values are: \"auto\" and \"last_messages\".""" + last_messages: Optional[int] = rest_field() + """The number of most recent messages from the thread when constructing the context for the run.""" + + @overload + def __init__( + self, + *, + type: Union[str, "_models.TruncationStrategy"], + last_messages: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UpdateCodeInterpreterToolResourceOptions(_model_base.Model): + """Request object to update ``code_interpreted`` tool resources. + + :ivar file_ids: A list of file IDs to override the current list of the agent. + :vartype file_ids: list[str] + """ + + file_ids: Optional[List[str]] = rest_field() + """A list of file IDs to override the current list of the agent.""" + + @overload + def __init__( + self, + *, + file_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UpdateFileSearchToolResourceOptions(_model_base.Model): + """Request object to update ``file_search`` tool resources. + + :ivar vector_store_ids: A list of vector store IDs to override the current list of the agent. + :vartype vector_store_ids: list[str] + """ + + vector_store_ids: Optional[List[str]] = rest_field() + """A list of vector store IDs to override the current list of the agent.""" + + @overload + def __init__( + self, + *, + vector_store_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class UpdateToolResourcesOptions(_model_base.Model): + """Request object. A set of resources that are used by the agent's tools. The resources are + specific to the type of tool. + For example, the ``code_interpreter`` tool requires a list of file IDs, while the + ``file_search`` tool requires a list of + vector store IDs. + + :ivar code_interpreter: Overrides the list of file IDs made available to the + ``code_interpreter`` tool. There can be a maximum of 20 files + associated with the tool. + :vartype code_interpreter: ~azure.ai.projects.models.UpdateCodeInterpreterToolResourceOptions + :ivar file_search: Overrides the vector store attached to this agent. There can be a maximum of + 1 vector store attached to the agent. + :vartype file_search: ~azure.ai.projects.models.UpdateFileSearchToolResourceOptions + :ivar bing_grounding: Overrides the list of connections to be used by the ``bing_grounding`` + tool consisting of connection IDs. + :vartype bing_grounding: ~azure.ai.projects.models.ConnectionListResource + :ivar microsoft_fabric: Overrides the list of connections to be used by the + ``microsoft_fabric`` tool consisting of connection IDs. + :vartype microsoft_fabric: ~azure.ai.projects.models.ConnectionListResource + :ivar share_point: Overrides the list of connections to be used by the ``sharepoint`` tool + consisting of connection IDs. + :vartype share_point: ~azure.ai.projects.models.ConnectionListResource + :ivar azure_ai_search: Overrides the resources to be used by the ``azure_ai_search`` tool + consisting of index IDs and names. + :vartype azure_ai_search: ~azure.ai.projects.models.AzureAISearchResource + """ + + code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = rest_field() + """Overrides the list of file IDs made available to the ``code_interpreter`` tool. There can be a + maximum of 20 files + associated with the tool.""" + file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = rest_field() + """Overrides the vector store attached to this agent. There can be a maximum of 1 vector store + attached to the agent.""" + bing_grounding: Optional["_models.ConnectionListResource"] = rest_field() + """Overrides the list of connections to be used by the ``bing_grounding`` tool consisting of + connection IDs.""" + microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() + """Overrides the list of connections to be used by the ``microsoft_fabric`` tool consisting of + connection IDs.""" + share_point: Optional["_models.ConnectionListResource"] = rest_field(name="sharepoint") + """Overrides the list of connections to be used by the ``sharepoint`` tool consisting of + connection IDs.""" + azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() + """Overrides the resources to be used by the ``azure_ai_search`` tool consisting of index IDs and + names.""" + + @overload + def __init__( + self, + *, + code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = None, + file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = None, + bing_grounding: Optional["_models.ConnectionListResource"] = None, + microsoft_fabric: Optional["_models.ConnectionListResource"] = None, + share_point: Optional["_models.ConnectionListResource"] = None, + azure_ai_search: Optional["_models.AzureAISearchResource"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStore(_model_base.Model): + """A vector store is a collection of processed files can be used by the ``file_search`` tool. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store``. Required. Default value is + "vector_store". + :vartype object: str + :ivar created_at: The Unix timestamp (in seconds) for when the vector store was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar name: The name of the vector store. Required. + :vartype name: str + :ivar usage_bytes: The total number of bytes used by the files in the vector store. Required. + :vartype usage_bytes: int + :ivar file_counts: Files count grouped by status processed or being processed by this vector + store. Required. + :vartype file_counts: ~azure.ai.projects.models.VectorStoreFileCount + :ivar status: The status of the vector store, which can be either ``expired``\\ , + ``in_progress``\\ , or ``completed``. A status of ``completed`` indicates that the vector store + is ready for use. Required. Known values are: "expired", "in_progress", and "completed". + :vartype status: str or ~azure.ai.projects.models.VectorStoreStatus + :ivar expires_after: Details on when this vector store expires. + :vartype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :ivar expires_at: The Unix timestamp (in seconds) for when the vector store will expire. + :vartype expires_at: ~datetime.datetime + :ivar last_active_at: The Unix timestamp (in seconds) for when the vector store was last + active. Required. + :vartype last_active_at: ~datetime.datetime + :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for + storing additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required. + :vartype metadata: dict[str, str] + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store"] = rest_field() + """The object type, which is always ``vector_store``. Required. Default value is \"vector_store\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store was created. Required.""" + name: str = rest_field() + """The name of the vector store. Required.""" + usage_bytes: int = rest_field() + """The total number of bytes used by the files in the vector store. Required.""" + file_counts: "_models.VectorStoreFileCount" = rest_field() + """Files count grouped by status processed or being processed by this vector store. Required.""" + status: Union[str, "_models.VectorStoreStatus"] = rest_field() + """The status of the vector store, which can be either ``expired``\ , ``in_progress``\ , or + ``completed``. A status of ``completed`` indicates that the vector store is ready for use. + Required. Known values are: \"expired\", \"in_progress\", and \"completed\".""" + expires_after: Optional["_models.VectorStoreExpirationPolicy"] = rest_field() + """Details on when this vector store expires.""" + expires_at: Optional[datetime.datetime] = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store will expire.""" + last_active_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store was last active. Required.""" + metadata: Dict[str, str] = rest_field() + """A set of up to 16 key/value pairs that can be attached to an object, used for storing + additional information about that object in a structured format. Keys may be up to 64 + characters in length and values may be up to 512 characters in length. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + name: str, + usage_bytes: int, + file_counts: "_models.VectorStoreFileCount", + status: Union[str, "_models.VectorStoreStatus"], + last_active_at: datetime.datetime, + metadata: Dict[str, str], + expires_after: Optional["_models.VectorStoreExpirationPolicy"] = None, + expires_at: Optional[datetime.datetime] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store"] = "vector_store" + + +class VectorStoreChunkingStrategyRequest(_model_base.Model): + """An abstract representation of a vector store chunking strategy configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + VectorStoreAutoChunkingStrategyRequest, VectorStoreStaticChunkingStrategyRequest + + All required parameters must be populated in order to send to server. + + :ivar type: The object type. Required. Known values are: "auto" and "static". + :vartype type: str or ~azure.ai.projects.models.VectorStoreChunkingStrategyRequestType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Known values are: \"auto\" and \"static\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreAutoChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="auto"): + """The default strategy. This strategy currently uses a max_chunk_size_tokens of 800 and + chunk_overlap_tokens of 400. + + All required parameters must be populated in order to send to server. + + :ivar type: The object type, which is always 'auto'. Required. + :vartype type: str or ~azure.ai.projects.models.AUTO + """ + + type: Literal[VectorStoreChunkingStrategyRequestType.AUTO] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'auto'. Required.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.AUTO, **kwargs) + + +class VectorStoreChunkingStrategyResponse(_model_base.Model): + """An abstract representation of a vector store chunking strategy configuration. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + VectorStoreAutoChunkingStrategyResponse, VectorStoreStaticChunkingStrategyResponse + + + :ivar type: The object type. Required. Known values are: "other" and "static". + :vartype type: str or ~azure.ai.projects.models.VectorStoreChunkingStrategyResponseType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + type: str = rest_discriminator(name="type") + """The object type. Required. Known values are: \"other\" and \"static\".""" + + @overload + def __init__( + self, + *, + type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreAutoChunkingStrategyResponse(VectorStoreChunkingStrategyResponse, discriminator="other"): + """This is returned when the chunking strategy is unknown. Typically, this is because the file was + indexed before the chunking_strategy concept was introduced in the API. + + + :ivar type: The object type, which is always 'other'. Required. + :vartype type: str or ~azure.ai.projects.models.OTHER + """ + + type: Literal[VectorStoreChunkingStrategyResponseType.OTHER] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'other'. Required.""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.OTHER, **kwargs) + + +class VectorStoreDeletionStatus(_model_base.Model): + """Response object for deleting a vector store. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value + is "vector_store.deleted". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["vector_store.deleted"] = rest_field() + """The object type, which is always 'vector_store.deleted'. Required. Default value is + \"vector_store.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.deleted"] = "vector_store.deleted" + + +class VectorStoreExpirationPolicy(_model_base.Model): + """The expiration policy for a vector store. + + + :ivar anchor: Anchor timestamp after which the expiration policy applies. Supported anchors: + ``last_active_at``. Required. "last_active_at" + :vartype anchor: str or ~azure.ai.projects.models.VectorStoreExpirationPolicyAnchor + :ivar days: The anchor timestamp after which the expiration policy applies. Required. + :vartype days: int + """ + + anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"] = rest_field() + """Anchor timestamp after which the expiration policy applies. Supported anchors: + ``last_active_at``. Required. \"last_active_at\"""" + days: int = rest_field() + """The anchor timestamp after which the expiration policy applies. Required.""" + + @overload + def __init__( + self, + *, + anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"], + days: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreFile(_model_base.Model): + """Description of a file attached to a vector store. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store.file``. Required. Default value + is "vector_store.file". + :vartype object: str + :ivar usage_bytes: The total vector store usage in bytes. Note that this may be different from + the original file + size. Required. + :vartype usage_bytes: int + :ivar created_at: The Unix timestamp (in seconds) for when the vector store file was created. + Required. + :vartype created_at: ~datetime.datetime + :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. + :vartype vector_store_id: str + :ivar status: The status of the vector store file, which can be either ``in_progress``\\ , + ``completed``\\ , ``cancelled``\\ , or ``failed``. The status ``completed`` indicates that the + vector store file is ready for use. Required. Known values are: "in_progress", "completed", + "failed", and "cancelled". + :vartype status: str or ~azure.ai.projects.models.VectorStoreFileStatus + :ivar last_error: The last error associated with this vector store file. Will be ``null`` if + there are no errors. Required. + :vartype last_error: ~azure.ai.projects.models.VectorStoreFileError + :ivar chunking_strategy: The strategy used to chunk the file. Required. + :vartype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyResponse + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store.file"] = rest_field() + """The object type, which is always ``vector_store.file``. Required. Default value is + \"vector_store.file\".""" + usage_bytes: int = rest_field() + """The total vector store usage in bytes. Note that this may be different from the original file + size. Required.""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store file was created. Required.""" + vector_store_id: str = rest_field() + """The ID of the vector store that the file is attached to. Required.""" + status: Union[str, "_models.VectorStoreFileStatus"] = rest_field() + """The status of the vector store file, which can be either ``in_progress``\ , ``completed``\ , + ``cancelled``\ , or ``failed``. The status ``completed`` indicates that the vector store file + is ready for use. Required. Known values are: \"in_progress\", \"completed\", \"failed\", and + \"cancelled\".""" + last_error: "_models.VectorStoreFileError" = rest_field() + """The last error associated with this vector store file. Will be ``null`` if there are no errors. + Required.""" + chunking_strategy: "_models.VectorStoreChunkingStrategyResponse" = rest_field() + """The strategy used to chunk the file. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + usage_bytes: int, + created_at: datetime.datetime, + vector_store_id: str, + status: Union[str, "_models.VectorStoreFileStatus"], + last_error: "_models.VectorStoreFileError", + chunking_strategy: "_models.VectorStoreChunkingStrategyResponse", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.file"] = "vector_store.file" + + +class VectorStoreFileBatch(_model_base.Model): + """A batch of files attached to a vector store. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The identifier, which can be referenced in API endpoints. Required. + :vartype id: str + :ivar object: The object type, which is always ``vector_store.file_batch``. Required. Default + value is "vector_store.files_batch". + :vartype object: str + :ivar created_at: The Unix timestamp (in seconds) for when the vector store files batch was + created. Required. + :vartype created_at: ~datetime.datetime + :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. + :vartype vector_store_id: str + :ivar status: The status of the vector store files batch, which can be either ``in_progress``\\ + , ``completed``\\ , ``cancelled`` or ``failed``. Required. Known values are: "in_progress", + "completed", "cancelled", and "failed". + :vartype status: str or ~azure.ai.projects.models.VectorStoreFileBatchStatus + :ivar file_counts: Files count grouped by status processed or being processed by this vector + store. Required. + :vartype file_counts: ~azure.ai.projects.models.VectorStoreFileCount + """ + + id: str = rest_field() + """The identifier, which can be referenced in API endpoints. Required.""" + object: Literal["vector_store.files_batch"] = rest_field() + """The object type, which is always ``vector_store.file_batch``. Required. Default value is + \"vector_store.files_batch\".""" + created_at: datetime.datetime = rest_field(format="unix-timestamp") + """The Unix timestamp (in seconds) for when the vector store files batch was created. Required.""" + vector_store_id: str = rest_field() + """The ID of the vector store that the file is attached to. Required.""" + status: Union[str, "_models.VectorStoreFileBatchStatus"] = rest_field() + """The status of the vector store files batch, which can be either ``in_progress``\ , + ``completed``\ , ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", + \"completed\", \"cancelled\", and \"failed\".""" + file_counts: "_models.VectorStoreFileCount" = rest_field() + """Files count grouped by status processed or being processed by this vector store. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + created_at: datetime.datetime, + vector_store_id: str, + status: Union[str, "_models.VectorStoreFileBatchStatus"], + file_counts: "_models.VectorStoreFileCount", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.files_batch"] = "vector_store.files_batch" + + +class VectorStoreFileCount(_model_base.Model): + """Counts of files processed or being processed by this vector store grouped by status. + + + :ivar in_progress: The number of files that are currently being processed. Required. + :vartype in_progress: int + :ivar completed: The number of files that have been successfully processed. Required. + :vartype completed: int + :ivar failed: The number of files that have failed to process. Required. + :vartype failed: int + :ivar cancelled: The number of files that were cancelled. Required. + :vartype cancelled: int + :ivar total: The total number of files. Required. + :vartype total: int + """ + + in_progress: int = rest_field() + """The number of files that are currently being processed. Required.""" + completed: int = rest_field() + """The number of files that have been successfully processed. Required.""" + failed: int = rest_field() + """The number of files that have failed to process. Required.""" + cancelled: int = rest_field() + """The number of files that were cancelled. Required.""" + total: int = rest_field() + """The total number of files. Required.""" + + @overload + def __init__( + self, + *, + in_progress: int, + completed: int, + failed: int, + cancelled: int, + total: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreFileDeletionStatus(_model_base.Model): + """Response object for deleting a vector store file relationship. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar id: The ID of the resource specified for deletion. Required. + :vartype id: str + :ivar deleted: A value indicating whether deletion was successful. Required. + :vartype deleted: bool + :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value + is "vector_store.file.deleted". + :vartype object: str + """ + + id: str = rest_field() + """The ID of the resource specified for deletion. Required.""" + deleted: bool = rest_field() + """A value indicating whether deletion was successful. Required.""" + object: Literal["vector_store.file.deleted"] = rest_field() + """The object type, which is always 'vector_store.deleted'. Required. Default value is + \"vector_store.file.deleted\".""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + deleted: bool, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.object: Literal["vector_store.file.deleted"] = "vector_store.file.deleted" + + +class VectorStoreFileError(_model_base.Model): + """Details on the error that may have ocurred while processing a file for this vector store. + + + :ivar code: One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: + "internal_error", "file_not_found", "parsing_error", and "unhandled_mime_type". + :vartype code: str or ~azure.ai.projects.models.VectorStoreFileErrorCode + :ivar message: A human-readable description of the error. Required. + :vartype message: str + """ + + code: Union[str, "_models.VectorStoreFileErrorCode"] = rest_field() + """One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: + \"internal_error\", \"file_not_found\", \"parsing_error\", and \"unhandled_mime_type\".""" + message: str = rest_field() + """A human-readable description of the error. Required.""" + + @overload + def __init__( + self, + *, + code: Union[str, "_models.VectorStoreFileErrorCode"], + message: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreStaticChunkingStrategyOptions(_model_base.Model): + """Options to configure a vector store static chunking strategy. + + + :ivar max_chunk_size_tokens: The maximum number of tokens in each chunk. The default value is + 800. The minimum value is 100 and the maximum value is 4096. Required. + :vartype max_chunk_size_tokens: int + :ivar chunk_overlap_tokens: The number of tokens that overlap between chunks. The default value + is 400. + Note that the overlap must not exceed half of max_chunk_size_tokens. *. Required. + :vartype chunk_overlap_tokens: int + """ + + max_chunk_size_tokens: int = rest_field() + """The maximum number of tokens in each chunk. The default value is 800. The minimum value is 100 + and the maximum value is 4096. Required.""" + chunk_overlap_tokens: int = rest_field() + """The number of tokens that overlap between chunks. The default value is 400. + Note that the overlap must not exceed half of max_chunk_size_tokens. *. Required.""" + + @overload + def __init__( + self, + *, + max_chunk_size_tokens: int, + chunk_overlap_tokens: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreStaticChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="static"): + """A statically configured chunking strategy. + + All required parameters must be populated in order to send to server. + + :ivar type: The object type, which is always 'static'. Required. + :vartype type: str or ~azure.ai.projects.models.STATIC + :ivar static: The options for the static chunking strategy. Required. + :vartype static: ~azure.ai.projects.models.VectorStoreStaticChunkingStrategyOptions + """ + + type: Literal[VectorStoreChunkingStrategyRequestType.STATIC] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'static'. Required.""" + static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field() + """The options for the static chunking strategy. Required.""" + + @overload + def __init__( + self, + *, + static: "_models.VectorStoreStaticChunkingStrategyOptions", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.STATIC, **kwargs) + + +class VectorStoreStaticChunkingStrategyResponse( + VectorStoreChunkingStrategyResponse, discriminator="static" +): # pylint: disable=name-too-long + """A statically configured chunking strategy. + + + :ivar type: The object type, which is always 'static'. Required. + :vartype type: str or ~azure.ai.projects.models.STATIC + :ivar static: The options for the static chunking strategy. Required. + :vartype static: ~azure.ai.projects.models.VectorStoreStaticChunkingStrategyOptions + """ + + type: Literal[VectorStoreChunkingStrategyResponseType.STATIC] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'static'. Required.""" + static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field() + """The options for the static chunking strategy. Required.""" + + @overload + def __init__( + self, + *, + static: "_models.VectorStoreStaticChunkingStrategyOptions", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.STATIC, **kwargs) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py new file mode 100644 index 000000000000..9925347721e1 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -0,0 +1,997 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +import datetime +import inspect +import json +import logging +import base64 +import asyncio + +from azure.core.credentials import TokenCredential, AccessToken + +from ._enums import AgentStreamEvent, ConnectionType +from ._models import ( + ConnectionsListSecretsResponse, + MessageDeltaChunk, + SubmitToolOutputsAction, + ThreadRun, + RunStep, + ThreadMessage, + RunStepDeltaChunk, + FunctionToolDefinition, + FunctionDefinition, + ToolDefinition, + ToolResources, + FileSearchToolDefinition, + FileSearchToolResource, + CodeInterpreterToolDefinition, + CodeInterpreterToolResource, + RequiredFunctionToolCall, +) + +from abc import ABC, abstractmethod +from typing import AsyncIterator, Awaitable, Callable, List, Dict, Any, Type, Optional, Iterator, Tuple, get_origin + +logger = logging.getLogger(__name__) + + +def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[str, Any]: + """ + Remove the parameters, non present in class public fields; return shallow copy of a dictionary. + + **Note:** Classes inherited from the model check that the parameters are present + in the list of attributes and if they are not, the error is being raised. This check may not + be relevant for classes, not inherited from azure.ai.projects._model_base.Model. + :param model_class: The class of model to be used. + :param parameters: The parsed dictionary with parameters. + :return: The dictionary with all invalid parameters removed. + """ + new_params = {} + valid_parameters = set( + filter( + lambda x: not x.startswith("_") and hasattr(model_class.__dict__[x], "_type"), model_class.__dict__.keys() + ) + ) + for k in filter(lambda x: x in valid_parameters, parameters.keys()): + new_params[k] = parameters[k] + return new_params + + +def _safe_instantiate(model_class: Type, parameters: Dict[str, Any]) -> Any: + """ + Instantiate class with the set of parameters from the server. + + :param model_class: The class of model to be used. + :param parameters: The parsed dictionary with parameters. + :return: The class of model_class type if parameters is a dictionary, or the parameters themselves otherwise. + """ + if not isinstance(parameters, dict): + return parameters + return model_class(**_filter_parameters(model_class, parameters)) + + +class ConnectionProperties: + """The properties of a single connection. + + :ivar id: A unique identifier for the connection. + :vartype id: str + :ivar name: The friendly name of the connection. + :vartype name: str + :ivar authentication_type: The authentication type used by the connection. + :vartype authentication_type: ~azure.ai.projects.models._models.AuthenticationType + :ivar connection_type: The connection type . + :vartype connection_type: ~azure.ai.projects.models._models.ConnectionType + :ivar endpoint_url: The endpoint URL associated with this connection + :vartype endpoint_url: str + :ivar key: The api-key to be used when accessing the connection. + :vartype key: str + :ivar token_credential: The TokenCredential to be used when accessing the connection. + :vartype token_credential: ~azure.core.credentials.TokenCredential + """ + + def __init__(self, *, connection: ConnectionsListSecretsResponse, token_credential: TokenCredential = None) -> None: + self.id = connection.id + self.name = connection.name + self.authentication_type = connection.properties.auth_type + self.connection_type = connection.properties.category + self.endpoint_url = ( + connection.properties.target[:-1] + if connection.properties.target.endswith("/") + else connection.properties.target + ) + self.key: str = None + if hasattr(connection.properties, "credentials"): + if hasattr(connection.properties.credentials, "key"): + self.key = connection.properties.credentials.key + self.token_credential = token_credential + + def to_evaluator_model_config(self, deployment_name, api_version) -> Dict[str, str]: + connection_type = self.connection_type.value + if self.connection_type.value == ConnectionType.AZURE_OPEN_AI: + connection_type = "azure_openai" + + if self.authentication_type == "ApiKey": + model_config = { + "azure_deployment": deployment_name, + "azure_endpoint": self.endpoint_url, + "type": connection_type, + "api_version": api_version, + "api_key": f"{self.id}/credentials/key", + } + else: + model_config = { + "azure_deployment": deployment_name, + "azure_endpoint": self.endpoint_url, + "type": self.connection_type, + "api_version": api_version, + } + return model_config + + def __str__(self): + out = "{\n" + out += f' "name": "{self.name}",\n' + out += f' "id": "{self.id}",\n' + out += f' "authentication_type": "{self.authentication_type}",\n' + out += f' "connection_type": "{self.connection_type}",\n' + out += f' "endpoint_url": "{self.endpoint_url}",\n' + if self.key: + out += f' "key": "{self.key}",\n' + else: + out += f' "key": null,\n' + if self.token_credential: + access_token = self.token_credential.get_token("https://cognitiveservices.azure.com/.default") + out += f' "token_credential": "{access_token.token}", expires on {access_token.expires_on} ({datetime.datetime.fromtimestamp(access_token.expires_on, datetime.timezone.utc)})\n' + else: + out += f' "token_credential": null\n' + out += "}\n" + return out + + +class SASTokenCredential(TokenCredential): + def __init__( + self, + *, + sas_token: str, + credential: TokenCredential, + subscription_id: str, + resource_group_name: str, + project_name: str, + connection_name: str, + ): + self._sas_token = sas_token + self._credential = credential + self._subscription_id = subscription_id + self._resource_group_name = resource_group_name + self._project_name = project_name + self._connection_name = connection_name + self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) + logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) + + @classmethod + def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime: + payload = jwt_token.split(".")[1] + padded_payload = payload + "=" * (4 - len(payload) % 4) # Add padding if necessary + decoded_bytes = base64.urlsafe_b64decode(padded_payload) + decoded_str = decoded_bytes.decode("utf-8") + decoded_payload = json.loads(decoded_str) + expiration_date = decoded_payload.get("exp") + return datetime.datetime.fromtimestamp(expiration_date, datetime.timezone.utc) + + def _refresh_token(self) -> None: + logger.debug("[SASTokenCredential._refresh_token] Enter") + from azure.ai.projects import AIProjectClient + + project_client = AIProjectClient( + credential=self._credential, + endpoint="not-needed", # Since we are only going to use the "connections" operations, we don't need to supply an endpoint. http://management.azure.com is hard coded in the SDK. + subscription_id=self._subscription_id, + resource_group_name=self._resource_group_name, + project_name=self._project_name, + ) + + connection = project_client.connections.get(connection_name=self._connection_name, with_credentials=True) + + self._sas_token = connection.properties.credentials.sas + self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) + logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) + + def get_token(self) -> AccessToken: + logger.debug("SASTokenCredential.get_token] Enter") + if self._expires_on < datetime.datetime.now(datetime.timezone.utc): + self._refresh_token() + return AccessToken(self._sas_token, self._expires_on.timestamp()) + + +# Define type_map to translate Python type annotations to JSON Schema types +type_map = { + "str": "string", + "int": "integer", + "float": "number", + "bool": "boolean", + "bytes": "string", # Typically encoded as base64-encoded strings in JSON + "NoneType": "null", + "datetime": "string", # Use format "date-time" + "date": "string", # Use format "date" + "UUID": "string", # Use format "uuid" +} + + +def _map_type(annotation) -> str: + + if annotation == inspect.Parameter.empty: + return "string" # Default type if annotation is missing + + origin = get_origin(annotation) + + if origin in {list, List}: + return "array" + elif origin in {dict, Dict}: + return "object" + elif hasattr(annotation, "__name__"): + return type_map.get(annotation.__name__, "string") + elif isinstance(annotation, type): + return type_map.get(annotation.__name__, "string") + + return "string" # Fallback to "string" if type is unrecognized + + +class Tool(ABC): + """ + An abstract class representing a tool that can be used by an agent. + """ + + @property + @abstractmethod + def definitions(self) -> List[ToolDefinition]: + """Get the tool definitions.""" + pass + + @property + @abstractmethod + def resources(self) -> ToolResources: + """Get the tool resources.""" + pass + + @abstractmethod + def execute(self, tool_call: Any) -> Any: + """ + Execute the tool with the provided tool call. + + :param tool_call: The tool call to execute. + :return: The output of the tool operations. + """ + pass + + +class FunctionTool(Tool): + """ + A tool that executes user-defined functions. + """ + + def __init__(self, functions: Dict[str, Any]): + """ + Initialize FunctionTool with a dictionary of functions. + + :param functions: A dictionary where keys are function names and values are the function objects. + """ + self._functions = functions + self._definitions = self._build_function_definitions(functions) + + def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDefinition]: + specs = [] + for name, func in functions.items(): + sig = inspect.signature(func) + params = sig.parameters + docstring = inspect.getdoc(func) + description = docstring.split("\n")[0] if docstring else "No description" + + properties = {} + for param_name, param in params.items(): + param_type = _map_type(param.annotation) + param_description = param.annotation.__doc__ if param.annotation != inspect.Parameter.empty else None + properties[param_name] = {"type": param_type, "description": param_description} + + function_def = FunctionDefinition( + name=name, + description=description, + parameters={"type": "object", "properties": properties, "required": list(params.keys())}, + ) + tool_def = FunctionToolDefinition(function=function_def) + specs.append(tool_def) + return specs + + def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, Dict[str, Any]]: + function_name = tool_call.function.name + arguments = tool_call.function.arguments + + if function_name not in self._functions: + logging.error(f"Function '{function_name}' not found.") + raise ValueError(f"Function '{function_name}' not found.") + + function = self._functions[function_name] + + try: + parsed_arguments = json.loads(arguments) + except json.JSONDecodeError as e: + logging.error(f"Invalid JSON arguments for function '{function_name}': {e}") + raise ValueError(f"Invalid JSON arguments: {e}") from e + + if not isinstance(parsed_arguments, dict): + logging.error(f"Arguments must be a JSON object for function '{function_name}'.") + raise TypeError("Arguments must be a JSON object.") + + return function, parsed_arguments + + def execute(self, tool_call: RequiredFunctionToolCall) -> Any: + function, parsed_arguments = self._get_func_and_args(tool_call) + + try: + return function(**parsed_arguments) if parsed_arguments else function() + except TypeError as e: + logging.error(f"Error executing function '{tool_call.function.name}': {e}") + raise + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the function definitions. + + :return: A list of function definitions. + """ + return self._definitions + + @property + def resources(self) -> ToolResources: + """ + Get the tool resources for the agent. + + :return: An empty ToolResources as FunctionTool doesn't have specific resources. + """ + return ToolResources() + + +class AsyncFunctionTool(FunctionTool): + + async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: + function, parsed_arguments = self._get_func_and_args(tool_call) + + try: + if inspect.iscoroutinefunction(function): + return await function(**parsed_arguments) if parsed_arguments else await function() + else: + return function(**parsed_arguments) if parsed_arguments else function() + except TypeError as e: + logging.error(f"Error executing function '{tool_call.function.name}': {e}") + raise + + +class FileSearchTool(Tool): + """ + A tool that searches for uploaded file information from the created vector stores. + """ + + def __init__(self, vector_store_ids: List[str] = []): + self.vector_store_ids = vector_store_ids + + def add_vector_store(self, store_id: str): + """ + Add a vector store ID to the list of vector stores to search for files. + """ + self.vector_store_ids.append(store_id) + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the file search tool definitions. + """ + return [FileSearchToolDefinition()] + + @property + def resources(self) -> ToolResources: + """ + Get the file search resources. + """ + return ToolResources(file_search=FileSearchToolResource(vector_store_ids=self.vector_store_ids)) + + def execute(self, tool_call: Any) -> Any: + pass + + +class CodeInterpreterTool(Tool): + """ + A tool that interprets code files uploaded to the agent. + """ + + def __init__(self): + self.file_ids = [] + + def add_file(self, file_id: str): + """ + Add a file ID to the list of files to interpret. + + :param file_id: The ID of the file to interpret. + """ + self.file_ids.append(file_id) + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the code interpreter tool definitions. + """ + return [CodeInterpreterToolDefinition()] + + @property + def resources(self) -> ToolResources: + """ + Get the code interpreter resources. + """ + return ToolResources(code_interpreter=CodeInterpreterToolResource(file_ids=self.file_ids)) + + def execute(self, tool_call: Any) -> Any: + pass + + +class ToolSet: + """ + A collection of tools that can be used by an agent. + """ + + def __init__(self): + self._tools: List[Tool] = [] + + def validate_tool_type(self, tool_type: Type[Tool]) -> None: + """ + Validate the type of the tool. + + :param tool_type: The type of the tool to validate. + :raises ValueError: If the tool type is not a subclass of Tool. + """ + if isinstance(tool_type, AsyncFunctionTool): + raise ValueError( + "AsyncFunctionTool is not supported in ToolSet. To use async functions, use AsyncToolSet and agents operations in azure.ai.projects.aio." + ) + + def add(self, tool: Tool): + """ + Add a tool to the tool set. + + :param tool: The tool to add. + :raises ValueError: If a tool of the same type already exists. + """ + self.validate_tool_type(type(tool)) + + if any(isinstance(existing_tool, type(tool)) for existing_tool in self._tools): + raise ValueError("Tool of type {type(tool).__name__} already exists in the ToolSet.") + self._tools.append(tool) + + def remove(self, tool_type: Type[Tool]) -> None: + """ + Remove a tool of the specified type from the tool set. + + :param tool_type: The type of tool to remove. + :raises ValueError: If a tool of the specified type is not found. + """ + for i, tool in enumerate(self._tools): + if isinstance(tool, tool_type): + del self._tools[i] + logging.info(f"Tool of type {tool_type.__name__} removed from the ToolSet.") + return + raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the definitions for all tools in the tool set. + """ + tools = [] + for tool in self._tools: + tools.extend(tool.definitions) + return tools + + @property + def resources(self) -> ToolResources: + """ + Get the resources for all tools in the tool set. + """ + tool_resources = {} + for tool in self._tools: + resources = tool.resources + for key, value in resources.items(): + if key in tool_resources: + if isinstance(tool_resources[key], dict) and isinstance(value, dict): + tool_resources[key].update(value) + else: + tool_resources[key] = value + return self._create_tool_resources_from_dict(tool_resources) + + def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolResources: + """ + Safely converts a dictionary into a ToolResources instance. + """ + try: + return ToolResources(**resources) + except TypeError as e: + logging.error(f"Error creating ToolResources: {e}") + raise ValueError("Invalid resources for ToolResources.") from e + + def get_definitions_and_resources(self) -> Dict[str, Any]: + """ + Get the definitions and resources for all tools in the tool set. + + :return: A dictionary containing the tool resources and definitions. + """ + return { + "tool_resources": self.resources, + "tools": self.definitions, + } + + def get_tool(self, tool_type: Type[Tool]) -> Tool: + """ + Get a tool of the specified type from the tool set. + + :param tool_type: The type of tool to get. + :return: The tool of the specified type. + :raises ValueError: If a tool of the specified type is not found. + """ + for tool in self._tools: + if isinstance(tool, tool_type): + return tool + raise ValueError(f"Tool of type {tool_type.__name__} not found.") + + def execute_tool_calls(self, tool_calls: List[Any]) -> Any: + """ + Execute a tool of the specified type with the provided tool calls. + + :param tool_calls: A list of tool calls to execute. + :return: The output of the tool operations. + """ + tool_outputs = [] + + for tool_call in tool_calls: + try: + if tool_call.type == "function": + tool = self.get_tool(FunctionTool) + output = tool.execute(tool_call) + tool_output = { + "tool_call_id": tool_call.id, + "output": output, + } + tool_outputs.append(tool_output) + except Exception as e: + logging.error(f"Failed to execute tool call {tool_call}: {e}") + + return tool_outputs + + +class AsyncToolSet(ToolSet): + + def validate_tool_type(self, tool_type: Type[Tool]) -> None: + """ + Validate the type of the tool. + + :param tool_type: The type of the tool to validate. + :raises ValueError: If the tool type is not a subclass of Tool. + """ + if isinstance(tool_type, FunctionTool): + raise ValueError( + "FunctionTool is not supported in AsyncToolSet. Please use AsyncFunctionTool instead and provide sync and/or async function(s)." + ) + + async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: + """ + Execute a tool of the specified type with the provided tool calls. + + :param tool_calls: A list of tool calls to execute. + :return: The output of the tool operations. + """ + tool_outputs = [] + + for tool_call in tool_calls: + try: + if tool_call.type == "function": + tool = self.get_tool(AsyncFunctionTool) + output = await tool.execute(tool_call) + tool_output = { + "tool_call_id": tool_call.id, + "output": output, + } + tool_outputs.append(tool_output) + except Exception as e: + logging.error(f"Failed to execute tool call {tool_call}: {e}") + + return tool_outputs + + +class AgentEventHandler: + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + """Handle message delta events.""" + pass + + def on_thread_message(self, message: "ThreadMessage") -> None: + """Handle thread message events.""" + pass + + def on_thread_run(self, run: "ThreadRun") -> None: + """Handle thread run events.""" + pass + + def on_run_step(self, step: "RunStep") -> None: + """Handle run step events.""" + pass + + def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: + """Handle run step delta events.""" + pass + + def on_error(self, data: str) -> None: + """Handle error events.""" + pass + + def on_done(self) -> None: + """Handle the completion of the stream.""" + pass + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + """Handle any unhandled event types.""" + pass + + +class AsyncAgentEventHandler: + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + """Handle message delta events.""" + pass + + async def on_thread_message(self, message: "ThreadMessage") -> None: + """Handle thread message events.""" + pass + + async def on_thread_run(self, run: "ThreadRun") -> None: + """Handle thread run events.""" + pass + + async def on_run_step(self, step: "RunStep") -> None: + """Handle run step events.""" + pass + + async def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: + """Handle run step delta events.""" + pass + + async def on_error(self, data: str) -> None: + """Handle error events.""" + pass + + async def on_done(self) -> None: + """Handle the completion of the stream.""" + pass + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + """Handle any unhandled event types.""" + pass + + +class AsyncAgentRunStream(AsyncIterator[Tuple[str, Any]]): + def __init__( + self, + response_iterator: AsyncIterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, Optional[AsyncAgentEventHandler]], Awaitable[None]], + event_handler: Optional["AsyncAgentEventHandler"] = None, + ): + self.response_iterator = response_iterator + self.event_handler = event_handler + self.done = False + self.buffer = "" + self.submit_tool_outputs = submit_tool_outputs + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + close_method = getattr(self.response_iterator, "close", None) + if callable(close_method): + result = close_method() + if asyncio.iscoroutine(result): + await result + + def __aiter__(self): + return self + + async def __anext__(self) -> Tuple[str, Any]: + while True: + try: + chunk = await self.response_iterator.__anext__() + self.buffer += chunk.decode("utf-8") + except StopAsyncIteration: + if self.buffer: + event_data_str, self.buffer = self.buffer, "" + if event_data_str: + return await self._process_event(event_data_str) + raise StopAsyncIteration + + while "\n\n" in self.buffer: + event_data_str, self.buffer = self.buffer.split("\n\n", 1) + return await self._process_event(event_data_str) + + def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: + event_lines = event_data_str.strip().split("\n") + event_type = None + event_data = "" + + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + try: + parsed_data = json.loads(event_data) + except json.JSONDecodeError: + parsed_data = event_data + + # Workaround for service bug: Rename 'expires_at' to 'expired_at' + if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: + parsed_data["expired_at"] = parsed_data.pop("expires_at") + + # Map to the appropriate class instance + if event_type in { + AgentStreamEvent.THREAD_RUN_CREATED, + AgentStreamEvent.THREAD_RUN_QUEUED, + AgentStreamEvent.THREAD_RUN_IN_PROGRESS, + AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION, + AgentStreamEvent.THREAD_RUN_COMPLETED, + AgentStreamEvent.THREAD_RUN_FAILED, + AgentStreamEvent.THREAD_RUN_CANCELLING, + AgentStreamEvent.THREAD_RUN_CANCELLED, + AgentStreamEvent.THREAD_RUN_EXPIRED, + }: + event_data_obj = _safe_instantiate(ThreadRun, parsed_data) + elif event_type in { + AgentStreamEvent.THREAD_RUN_STEP_CREATED, + AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, + AgentStreamEvent.THREAD_RUN_STEP_COMPLETED, + AgentStreamEvent.THREAD_RUN_STEP_FAILED, + AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, + AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, + }: + event_data_obj = _safe_instantiate(RunStep, parsed_data) + elif event_type in { + AgentStreamEvent.THREAD_MESSAGE_CREATED, + AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, + AgentStreamEvent.THREAD_MESSAGE_COMPLETED, + AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, + }: + event_data_obj = _safe_instantiate(ThreadMessage, parsed_data) + elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: + event_data_obj = _safe_instantiate(MessageDeltaChunk, parsed_data) + elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: + event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) + else: + event_data_obj = parsed_data + + return event_type, event_data_obj + + async def _process_event(self, event_data_str: str) -> Tuple[str, Any]: + event_type, event_data_obj = self._parse_event_data(event_data_str) + + if ( + isinstance(event_data_obj, ThreadRun) + and event_data_obj.status == "requires_action" + and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) + ): + await self.submit_tool_outputs(event_data_obj, self.event_handler) + if self.event_handler: + try: + if isinstance(event_data_obj, MessageDeltaChunk): + await self.event_handler.on_message_delta(event_data_obj) + elif isinstance(event_data_obj, ThreadMessage): + await self.event_handler.on_thread_message(event_data_obj) + elif isinstance(event_data_obj, ThreadRun): + await self.event_handler.on_thread_run(event_data_obj) + elif isinstance(event_data_obj, RunStep): + await self.event_handler.on_run_step(event_data_obj) + elif isinstance(event_data_obj, RunStepDeltaChunk): + await self.event_handler.on_run_step_delta(event_data_obj) + elif event_type == AgentStreamEvent.ERROR: + await self.event_handler.on_error(event_data_obj) + elif event_type == AgentStreamEvent.DONE: + await self.event_handler.on_done() + self.done = True # Mark the stream as done + else: + await self.event_handler.on_unhandled_event(event_type, event_data_obj) + except Exception as e: + logging.error(f"Error in event handler for event '{event_type}': {e}") + + return event_type, event_data_obj + + async def until_done(self) -> None: + """ + Iterates through all events until the stream is marked as done. + """ + try: + async for _ in self: + pass # The EventHandler handles the events + except StopAsyncIteration: + pass + + +class AgentRunStream(Iterator[Tuple[str, Any]]): + def __init__( + self, + response_iterator: Iterator[bytes], + submit_tool_outputs: Callable[[ThreadRun, Optional[AgentEventHandler]], None], + event_handler: Optional[AgentEventHandler] = None, + ): + self.response_iterator = response_iterator + self.event_handler = event_handler + self.done = False + self.buffer = "" + self.submit_tool_outputs = submit_tool_outputs + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + close_method = getattr(self.response_iterator, "close", None) + if callable(close_method): + close_method() + + def __iter__(self): + return self + + def __next__(self) -> Tuple[str, Any]: + if self.done: + raise StopIteration + while True: + try: + chunk = next(self.response_iterator) + self.buffer += chunk.decode("utf-8") + except StopIteration: + if self.buffer: + event_data_str, self.buffer = self.buffer, "" + if event_data_str: + return self._process_event(event_data_str) + raise StopIteration + + while "\n\n" in self.buffer: + event_data_str, self.buffer = self.buffer.split("\n\n", 1) + return self._process_event(event_data_str) + + def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: + event_lines = event_data_str.strip().split("\n") + event_type = None + event_data = "" + + for line in event_lines: + if line.startswith("event:"): + event_type = line.split(":", 1)[1].strip() + elif line.startswith("data:"): + event_data = line.split(":", 1)[1].strip() + + if not event_type: + raise ValueError("Event type not specified in the event data.") + + try: + parsed_data = json.loads(event_data) + except json.JSONDecodeError: + parsed_data = event_data + + # Workaround for service bug: Rename 'expires_at' to 'expired_at' + if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: + parsed_data["expired_at"] = parsed_data.pop("expires_at") + + # Map to the appropriate class instance + if event_type in { + AgentStreamEvent.THREAD_RUN_CREATED, + AgentStreamEvent.THREAD_RUN_QUEUED, + AgentStreamEvent.THREAD_RUN_IN_PROGRESS, + AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION, + AgentStreamEvent.THREAD_RUN_COMPLETED, + AgentStreamEvent.THREAD_RUN_FAILED, + AgentStreamEvent.THREAD_RUN_CANCELLING, + AgentStreamEvent.THREAD_RUN_CANCELLED, + AgentStreamEvent.THREAD_RUN_EXPIRED, + }: + event_data_obj = _safe_instantiate(ThreadRun, parsed_data) + elif event_type in { + AgentStreamEvent.THREAD_RUN_STEP_CREATED, + AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, + AgentStreamEvent.THREAD_RUN_STEP_COMPLETED, + AgentStreamEvent.THREAD_RUN_STEP_FAILED, + AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, + AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, + }: + event_data_obj = _safe_instantiate(RunStep, parsed_data) + elif event_type in { + AgentStreamEvent.THREAD_MESSAGE_CREATED, + AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, + AgentStreamEvent.THREAD_MESSAGE_COMPLETED, + AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, + }: + event_data_obj = _safe_instantiate(ThreadMessage, parsed_data) + elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: + event_data_obj = _safe_instantiate(MessageDeltaChunk, parsed_data) + elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: + event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) + else: + event_data_obj = parsed_data + + return event_type, event_data_obj + + def _process_event(self, event_data_str: str) -> Tuple[str, Any]: + event_type, event_data_obj = self._parse_event_data(event_data_str) + + if ( + isinstance(event_data_obj, ThreadRun) + and event_data_obj.status == "requires_action" + and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) + ): + self.submit_tool_outputs(event_data_obj, self.event_handler) + if self.event_handler: + try: + if isinstance(event_data_obj, MessageDeltaChunk): + self.event_handler.on_message_delta(event_data_obj) + elif isinstance(event_data_obj, ThreadMessage): + self.event_handler.on_thread_message(event_data_obj) + elif isinstance(event_data_obj, ThreadRun): + self.event_handler.on_thread_run(event_data_obj) + elif isinstance(event_data_obj, RunStep): + self.event_handler.on_run_step(event_data_obj) + elif isinstance(event_data_obj, RunStepDeltaChunk): + self.event_handler.on_run_step_delta(event_data_obj) + elif event_type == AgentStreamEvent.ERROR: + self.event_handler.on_error(event_data_obj) + elif event_type == AgentStreamEvent.DONE: + self.event_handler.on_done() + self.done = True # Mark the stream as done + else: + self.event_handler.on_unhandled_event(event_type, event_data_obj) + except Exception as e: + logging.error(f"Error in event handler for event '{event_type}': {e}") + + return event_type, event_data_obj + + def until_done(self) -> None: + """ + Iterates through all events until the stream is marked as done. + """ + try: + for _ in self: + pass # The EventHandler handles the events + except StopIteration: + pass + + +__all__: List[str] = [ + "AgentEventHandler", + "AgentRunStream", + "AsyncAgentEventHandler", + "AsyncAgentRunStream", + "AsyncFunctionTool", + "AsyncToolSet", + "CodeInterpreterTool", + "FileSearchTool", + "FunctionTool", + "SASTokenCredential", + "Tool", + "ToolSet", +] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/__init__.py new file mode 100644 index 000000000000..35cf92df96bc --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wrong-import-position + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ._patch import * # pylint: disable=unused-wildcard-import + +from ._operations import AgentsOperations # type: ignore +from ._operations import ConnectionsOperations # type: ignore +from ._operations import EvaluationsOperations # type: ignore + +from ._patch import __all__ as _patch_all +from ._patch import * +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AgentsOperations", + "ConnectionsOperations", + "EvaluationsOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore +_patch_sdk() diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py new file mode 100644 index 000000000000..29d195f38724 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -0,0 +1,7396 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, Callable, Dict, IO, Iterable, List, Optional, TYPE_CHECKING, TypeVar, Union, overload +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import _model_base, models as _models +from .._model_base import SdkJSONEncoder, _deserialize +from .._serialization import Serializer +from .._vendor import FileType, prepare_multipart_form_data + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore + +if TYPE_CHECKING: + from .. import _types +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_agents_create_agent_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_agents_request( + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_update_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/assistants/{assistantId}" + path_format_arguments = { + "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_thread_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_update_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_message_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_messages_request( + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if run_id is not None: + _params["runId"] = _SERIALIZER.query("run_id", run_id, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages/{messageId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "messageId": _SERIALIZER.url("message_id", message_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_update_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/messages/{messageId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "messageId": _SERIALIZER.url("message_id", message_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_run_request(thread_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_runs_request( + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_update_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_submit_tool_outputs_to_run_request( # pylint: disable=name-too-long + thread_id: str, run_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/submit_tool_outputs" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_cancel_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/cancel" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_thread_and_run_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/runs" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_run_step_request(thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/steps/{stepId}" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + "stepId": _SERIALIZER.url("step_id", step_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_run_steps_request( + thread_id: str, + run_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/threads/{threadId}/runs/{runId}/steps" + path_format_arguments = { + "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), + "runId": _SERIALIZER.url("run_id", run_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_files_request( + *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if purpose is not None: + _params["purpose"] = _SERIALIZER.query("purpose", purpose, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_upload_file_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_file_request(file_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files/{fileId}" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_file_request(file_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files/{fileId}" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_file_content_request(file_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/files/{fileId}/content" + path_format_arguments = { + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_vector_stores_request( + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_vector_store_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_modify_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_vector_store_files_request( # pylint: disable=name-too-long + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if filter is not None: + _params["filter"] = _SERIALIZER.query("filter", filter, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, file_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files/{fileId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_delete_vector_store_file_request( # pylint: disable=name-too-long + vector_store_id: str, file_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/files/{fileId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "fileId": _SERIALIZER.url("file_id", file_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_create_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_get_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, batch_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_cancel_vector_store_file_batch_request( # pylint: disable=name-too-long + vector_store_id: str, batch_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/cancel" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_agents_list_vector_store_file_batch_files_request( # pylint: disable=name-too-long + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/files" + path_format_arguments = { + "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), + "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if filter is not None: + _params["filter"] = _SERIALIZER.query("filter", filter, "str") + if limit is not None: + _params["limit"] = _SERIALIZER.query("limit", limit, "int") + if order is not None: + _params["order"] = _SERIALIZER.query("order", order, "str") + if after is not None: + _params["after"] = _SERIALIZER.query("after", after, "str") + if before is not None: + _params["before"] = _SERIALIZER.query("before", before, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_connections_list_request( + *, + category: Optional[Union[str, _models.ConnectionType]] = None, + include_all: Optional[bool] = None, + target: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/connections" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if category is not None: + _params["category"] = _SERIALIZER.query("category", category, "str") + if include_all is not None: + _params["includeAll"] = _SERIALIZER.query("include_all", include_all, "bool") + if target is not None: + _params["target"] = _SERIALIZER.query("target", target, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_connections_get_request(connection_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/connections/{connectionName}" + path_format_arguments = { + "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_connections_list_secrets_request(connection_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/connections/{connectionName}/listsecrets" + path_format_arguments = { + "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_get_request(id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/runs/{id}" + path_format_arguments = { + "id": _SERIALIZER.url("id", id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_create_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("apiVersion", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/runs:run" + + # Construct parameters + _params["apiVersion"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_list_request( + *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/runs" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + if skip is not None: + _params["skip"] = _SERIALIZER.query("skip", skip, "int") + if maxpagesize is not None: + _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_update_request(id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/runs/{id}" + path_format_arguments = { + "id": _SERIALIZER.url("id", id, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_get_schedule_request(name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/schedules/{name}" + path_format_arguments = { + "name": _SERIALIZER.url("name", name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_create_or_replace_schedule_request( # pylint: disable=name-too-long + name: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/schedules/{name}" + path_format_arguments = { + "name": _SERIALIZER.url("name", name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_list_schedule_request( + *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/schedules" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if top is not None: + _params["top"] = _SERIALIZER.query("top", top, "int") + if skip is not None: + _params["skip"] = _SERIALIZER.query("skip", skip, "int") + if maxpagesize is not None: + _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_evaluations_delete_schedule_request(name: str, **kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/evaluations/schedules/{name}" + path_format_arguments = { + "name": _SERIALIZER.url("name", name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +class AgentsOperations: # pylint: disable=too-many-public-methods + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`agents` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Creates a new agent. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + if model is _Unset: + raise TypeError("missing required argument: model") + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_agent_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_agents( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfAgent: + """Gets a list of agents that were previously created. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfAgent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) + + _request = build_agents_list_agents_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: + """Retrieves an existing agent. + + :param assistant_id: Identifier of the agent. Required. + :type assistant_id: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + _request = build_agents_get_agent_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_agent( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_agent( + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_agent( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_agent( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Agent] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "description": description, + "instructions": instructions, + "metadata": metadata, + "model": model, + "name": name, + "response_format": response_format, + "temperature": temperature, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_agent_request( + assistant_id=assistant_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Agent, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: + """Deletes an agent. + + :param assistant_id: Identifier of the agent. Required. + :type assistant_id: str + :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_agent_request( + assistant_id=assistant_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_thread( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread( + self, + *, + content_type: str = "application/json", + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_thread( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + messages: Optional[List[_models.ThreadMessageOptions]] = None, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Creates a new thread. Threads contain messages and can be run by agents. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword messages: The initial messages to associate with the new thread. Default value is + None. + :paramtype messages: list[~azure.ai.projects.models.ThreadMessageOptions] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_thread_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: + """Gets information about an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + _request = build_agents_get_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_thread( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_thread( + self, + thread_id: str, + *, + content_type: str = "application/json", + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_thread( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_thread( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_resources: Optional[_models.ToolResources] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.AgentThread: + """Modifies an existing thread. + + :param thread_id: The ID of the thread to modify. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_resources: A set of resources that are made available to the agent's tools in + this thread. The resources are specific to the + type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while + the ``file_search`` tool requires + a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: AgentThread. The AgentThread is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.AgentThread + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata, "tool_resources": tool_resources} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_thread_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AgentThread, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: + """Deletes an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_thread_request( + thread_id=thread_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_message( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_message( + self, + thread_id: str, + *, + role: Union[str, _models.MessageRole], + content: str, + content_type: str = "application/json", + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.projects.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.projects.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_message( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_message( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + role: Union[str, _models.MessageRole] = _Unset, + content: str = _Unset, + attachments: Optional[List[_models.MessageAttachment]] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Creates a new message on a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword role: The role of the entity that is creating the message. Allowed values include: + + + * ``user``\\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. + * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert + messages from the agent into + the conversation. Known values are: "user" and "assistant". Required. + :paramtype role: str or ~azure.ai.projects.models.MessageRole + :keyword content: The textual content of the initial message. Currently, robust input including + images and annotated text may only be provided via + a separate call to the create message API. Required. + :paramtype content: str + :keyword attachments: A list of files attached to the message, and the tools they should be + added to. Default value is None. + :paramtype attachments: list[~azure.ai.projects.models.MessageAttachment] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + if role is _Unset: + raise TypeError("missing required argument: role") + if content is _Unset: + raise TypeError("missing required argument: content") + body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_message_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_messages( + self, + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadMessage: + """Gets a list of messages that exist on a thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword run_id: Filter messages by the run ID that generated them. Default value is None. + :paramtype run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible + with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) + + _request = build_agents_list_messages_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: + """Gets an existing message from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + _request = build_agents_get_message_request( + thread_id=thread_id, + message_id=message_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_message( + self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_message( + self, + thread_id: str, + message_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_message( + self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_message( + self, + thread_id: str, + message_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadMessage: + """Modifies an existing message on an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param message_id: Identifier of the message. Required. + :type message_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessage + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_message_request( + thread_id=thread_id, + message_id=message_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadMessage, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_run( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "additional_instructions": additional_instructions, + "additional_messages": additional_messages, + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_run_request( + thread_id=thread_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_runs( + self, + thread_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfThreadRun: + """Gets a list of runs for a specified thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_list_runs_request( + thread_id=thread_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Gets an existing run from an existing thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_get_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def update_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_run( + self, + thread_id: str, + run_id: str, + *, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Modifies an existing thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"metadata": metadata} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_update_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + stream_parameter: Optional[bool] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword stream_parameter: If true, returns a stream of events that happen during the Run as + server-sent events, terminating when the run enters a terminal state. Default value is None. + :paramtype stream_parameter: bool + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if tool_outputs is _Unset: + raise TypeError("missing required argument: tool_outputs") + body = {"stream": stream_parameter, "tool_outputs": tool_outputs} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_submit_tool_outputs_to_run_request( + thread_id=thread_id, + run_id=run_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: + """Cancels a run of an in progress thread. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + _request = build_agents_cancel_run_request( + thread_id=thread_id, + run_id=run_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_thread_and_run( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread_and_run( + self, + *, + assistant_id: str, + content_type: str = "application/json", + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :keyword assistant_id: The ID of the agent for which the thread should be created. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.projects.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_thread_and_run( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_thread_and_run( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + thread: Optional[_models.AgentThreadCreationOptions] = None, + model: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, + stream_parameter: Optional[bool] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new agent thread and immediately starts a run using that new thread. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent for which the thread should be created. Required. + :paramtype assistant_id: str + :keyword thread: The details used to create the new thread. If no thread is provided, an empty + one will be created. Default value is None. + :paramtype thread: ~azure.ai.projects.models.AgentThreadCreationOptions + :keyword model: The overridden model that the agent should use to run the thread. Default value + is None. + :paramtype model: str + :keyword instructions: The overridden system instructions the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword tools: The overridden list of enabled tools the agent should use to run the thread. + Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: Override the tools the agent can use for this run. This is useful for + modifying the behavior on a per-run basis. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.UpdateToolResourcesOptions + :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the + Run as server-sent events, + terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default + value is None. + :paramtype stream_parameter: bool + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort to use only + the number of completion tokens specified, across multiple turns of the run. If the run + exceeds the number of completion tokens + specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more + info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode + or ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) + + if body is _Unset: + if assistant_id is _Unset: + raise TypeError("missing required argument: assistant_id") + body = { + "assistant_id": assistant_id, + "instructions": instructions, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "metadata": metadata, + "model": model, + "response_format": response_format, + "stream": stream_parameter, + "temperature": temperature, + "thread": thread, + "tool_choice": tool_choice, + "tool_resources": tool_resources, + "tools": tools, + "top_p": top_p, + "truncation_strategy": truncation_strategy, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_thread_and_run_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ThreadRun, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> _models.RunStep: + """Gets a single run step from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :param step_id: Identifier of the run step. Required. + :type step_id: str + :return: RunStep. The RunStep is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.RunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) + + _request = build_agents_get_run_step_request( + thread_id=thread_id, + run_id=run_id, + step_id=step_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.RunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_run_steps( + self, + thread_id: str, + run_id: str, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfRunStep: + """Gets a list of run steps from a thread run. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :param run_id: Identifier of the run. Required. + :type run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfRunStep + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) + + _request = build_agents_list_run_steps_request( + thread_id=thread_id, + run_id=run_id, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_files( + self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any + ) -> _models.FileListResponse: + """Gets a list of previously uploaded files. + + :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is + None. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :return: FileListResponse. The FileListResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.FileListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) + + _request = build_agents_list_files_request( + purpose=purpose, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileListResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: The file data, in bytes. Required. + :paramtype file: ~azure.ai.projects._vendor.FileType + :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and + Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword filename: The name of the file. Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def upload_file( + self, + body: JSON = _Unset, + *, + file: FileType = _Unset, + purpose: Union[str, _models.FilePurpose] = _Unset, + filename: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Is one of the following types: JSON Required. + :type body: JSON + :keyword file: The file data, in bytes. Required. + :paramtype file: ~azure.ai.projects._vendor.FileType + :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and + Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and + ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", + "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword filename: The name of the file. Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + if body is _Unset: + if file is _Unset: + raise TypeError("missing required argument: file") + if purpose is _Unset: + raise TypeError("missing required argument: purpose") + body = {"file": file, "filename": filename, "purpose": purpose} + body = {k: v for k, v in body.items() if v is not None} + _body = body.as_dict() if isinstance(body, _model_base.Model) else body + _file_fields: List[str] = ["file"] + _data_fields: List[str] = ["purpose", "filename"] + _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) + + _request = build_agents_upload_file_request( + api_version=self._config.api_version, + files=_files, + data=_data, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: + """Delete a previously uploaded file. + + :param file_id: The ID of the file to delete. Required. + :type file_id: str + :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.FileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) + + _request = build_agents_get_file_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentResponse: + """Returns information about a specific file. Does not retrieve file content. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: FileContentResponse. The FileContentResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.FileContentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.FileContentResponse] = kwargs.pop("cls", None) + + _request = build_agents_get_file_content_request( + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.FileContentResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_stores( + self, + *, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStore: + """Returns a list of vector stores. + + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible + with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfVectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_stores_request( + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store( + self, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store( + self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = { + "chunking_strategy": chunking_strategy, + "expires_after": expires_after, + "file_ids": file_ids, + "metadata": metadata, + "name": name, + } + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: + """Returns the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def modify_vector_store( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def modify_vector_store( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def modify_vector_store( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def modify_vector_store( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.VectorStore: + """The ID of the vector store to modify. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) + + if body is _Unset: + body = {"expires_after": expires_after, "metadata": metadata, "name": name} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_modify_vector_store_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStore, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: + """Deletes the vector store object matching the specified ID. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_vector_store_request( + vector_store_id=vector_store_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_store_files( + self, + vector_store_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.projects.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_store_files_request( + vector_store_id=vector_store_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store_file( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file( + self, + vector_store_id: str, + *, + file_id: str, + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_id: Identifier of the file. Required. + :paramtype file_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_id: str = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_id: Identifier of the file. Required. + :paramtype file_id: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + if body is _Unset: + if file_id is _Unset: + raise TypeError("missing required argument: file_id") + body = {"chunking_strategy": chunking_strategy, "file_id": file_id} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_file_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: + """Retrieves a vector store file. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete_vector_store_file( + self, vector_store_id: str, file_id: str, **kwargs: Any + ) -> _models.VectorStoreFileDeletionStatus: + """Delete a vector store file. This will remove the file from the vector store but the file itself + will not be deleted. + To delete the file, use the delete file endpoint. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param file_id: Identifier of the file. Required. + :type file_id: str + :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileDeletionStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) + + _request = build_agents_delete_vector_store_file_request( + vector_store_id=vector_store_id, + file_id=file_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create_vector_store_file_batch( + self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch( + self, + vector_store_id: str, + *, + file_ids: List[str], + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch( + self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file_batch( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_ids: List[str] = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + if body is _Unset: + if file_ids is _Unset: + raise TypeError("missing required argument: file_ids") + body = {"chunking_strategy": chunking_strategy, "file_ids": file_ids} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_agents_create_vector_store_file_batch_request( + vector_store_id=vector_store_id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Retrieve a vector store file batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_agents_get_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def cancel_vector_store_file_batch( + self, vector_store_id: str, batch_id: str, **kwargs: Any + ) -> _models.VectorStoreFileBatch: + """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch + as soon as possible. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) + + _request = build_agents_cancel_vector_store_file_batch_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_vector_store_file_batch_files( + self, + vector_store_id: str, + batch_id: str, + *, + filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.OpenAIPageableListOfVectorStoreFile: + """Returns a list of vector store files in a batch. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param batch_id: Identifier of the file batch. Required. + :type batch_id: str + :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", + and "cancelled". Default value is None. + :paramtype filter: str or ~azure.ai.projects.models.VectorStoreFileStatusFilter + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is + compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIPageableListOfVectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) + + _request = build_agents_list_vector_store_file_batch_files_request( + vector_store_id=vector_store_id, + batch_id=batch_id, + filter=filter, + limit=limit, + order=order, + after=after, + before=before, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class ConnectionsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`connections` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def _list( + self, + *, + category: Optional[Union[str, _models.ConnectionType]] = None, + include_all: Optional[bool] = None, + target: Optional[str] = None, + **kwargs: Any + ) -> _models._models.ConnectionsListResponse: + """List the details of all the connections (not including their credentials). + + :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". Default value is None. + :paramtype category: str or ~azure.ai.projects.models.ConnectionType + :keyword include_all: Indicates whether to list datastores. Service default: do not list + datastores. Default value is None. + :paramtype include_all: bool + :keyword target: Target of the workspace connection. Default value is None. + :paramtype target: str + :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models._models.ConnectionsListResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) + + _request = build_connections_list_request( + category=category, + include_all=include_all, + target=target, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def _get(self, connection_name: str, **kwargs: Any) -> _models._models.ConnectionsListSecretsResponse: + """Get the details of a single connection, without credentials. + + :param connection_name: Connection Name. Required. + :type connection_name: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models._models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + + _request = build_connections_get_request( + connection_name=connection_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def _list_secrets( + self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: ... + @overload + def _list_secrets( + self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: ... + @overload + def _list_secrets( + self, connection_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: ... + + @distributed_trace + def _list_secrets( + self, connection_name: str, body: Union[JSON, IO[bytes]] = _Unset, *, ignored: str = _Unset, **kwargs: Any + ) -> _models._models.ConnectionsListSecretsResponse: + """Get the details of a single connection, including credentials (if available). + + :param connection_name: Connection Name. Required. + :type connection_name: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword ignored: The body is ignored. TODO: Can we remove this?. Required. + :paramtype ignored: str + :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with + MutableMapping + :rtype: ~azure.ai.projects.models._models.ConnectionsListSecretsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + + if body is _Unset: + if ignored is _Unset: + raise TypeError("missing required argument: ignored") + body = {"ignored": ignored} + body = {k: v for k, v in body.items() if v is not None} + content_type = content_type or "application/json" + _content = None + if isinstance(body, (IOBase, bytes)): + _content = body + else: + _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_connections_list_secrets_request( + connection_name=connection_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class EvaluationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`evaluations` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get(self, id: str, **kwargs: Any) -> _models.Evaluation: + """Resource read operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + _request = build_evaluations_get_request( + id=id, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def create( + self, evaluation: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Required. + :type evaluation: ~azure.ai.projects.models.Evaluation + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create(self, evaluation: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Required. + :type evaluation: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create( + self, evaluation: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Required. + :type evaluation: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: + """Run the evaluation. + + :param evaluation: Evaluation to run. Is one of the following types: Evaluation, JSON, + IO[bytes] Required. + :type evaluation: ~azure.ai.projects.models.Evaluation or JSON or IO[bytes] + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(evaluation, (IOBase, bytes)): + _content = evaluation + else: + _content = json.dumps(evaluation, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list( + self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> Iterable["_models.Evaluation"]: + """Resource list operation template. + + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of Evaluation + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.Evaluation] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_list_request( + top=top, + skip=skip, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @overload + def update( + self, + id: str, + resource: _models.Evaluation, + *, + content_type: str = "application/merge-patch+json", + **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: ~azure.ai.projects.models.Evaluation + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update( + self, id: str, resource: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update( + self, id: str, resource: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Required. + :type resource: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/merge-patch+json". + :paramtype content_type: str + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update( + self, id: str, resource: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any + ) -> _models.Evaluation: + """Resource update operation template. + + :param id: Identifier of the evaluation. Required. + :type id: str + :param resource: The resource instance. Is one of the following types: Evaluation, JSON, + IO[bytes] Required. + :type resource: ~azure.ai.projects.models.Evaluation or JSON or IO[bytes] + :return: Evaluation. The Evaluation is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Evaluation + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) + + content_type = content_type or "application/merge-patch+json" + _content = None + if isinstance(resource, (IOBase, bytes)): + _content = resource + else: + _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_update_request( + id=id, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.Evaluation, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_schedule(self, name: str, **kwargs: Any) -> _models.EvaluationSchedule: + """Resource read operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) + + _request = build_evaluations_get_schedule_request( + name=name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @overload + def create_or_replace_schedule( + self, name: str, resource: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Required. + :type resource: ~azure.ai.projects.models.EvaluationSchedule + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_replace_schedule( + self, name: str, resource: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Required. + :type resource: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_replace_schedule( + self, name: str, resource: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Required. + :type resource: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_or_replace_schedule( + self, name: str, resource: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any + ) -> _models.EvaluationSchedule: + """Create or replace operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :param resource: The resource instance. Is one of the following types: EvaluationSchedule, + JSON, IO[bytes] Required. + :type resource: ~azure.ai.projects.models.EvaluationSchedule or JSON or IO[bytes] + :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.EvaluationSchedule + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(resource, (IOBase, bytes)): + _content = resource + else: + _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_evaluations_create_or_replace_schedule_request( + name=name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.EvaluationSchedule, response.json()) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list_schedule( + self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any + ) -> Iterable["_models.EvaluationSchedule"]: + """Resource list operation template. + + :keyword top: The number of result items to return. Default value is None. + :paramtype top: int + :keyword skip: The number of result items to skip. Default value is None. + :paramtype skip: int + :return: An iterator like instance of EvaluationSchedule + :rtype: ~azure.core.paging.ItemPaged[~azure.ai.projects.models.EvaluationSchedule] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + maxpagesize = kwargs.pop("maxpagesize", None) + cls: ClsType[List[_models.EvaluationSchedule]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_evaluations_list_schedule_request( + top=top, + skip=skip, + maxpagesize=maxpagesize, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url( + "self._config.subscription_id", self._config.subscription_id, "str" + ), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.EvaluationSchedule], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.get("nextLink") or None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @distributed_trace + def delete_schedule(self, name: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Resource delete operation template. + + :param name: Name of the schedule, which also serves as the unique identifier for the + evaluation. Required. + :type name: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_evaluations_delete_schedule_request( + name=name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + + if cls: + return cls(pipeline_response, None, response_headers) # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py new file mode 100644 index 000000000000..e166ab98f6c9 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -0,0 +1,1982 @@ +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +import sys, io, logging, os, time +from io import IOBase +from typing import List, Iterable, Union, IO, Any, Dict, Optional, overload, TYPE_CHECKING, Iterator, cast + +# from zoneinfo import ZoneInfo +from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated +from ._operations import AgentsOperations as AgentsOperationsGenerated +from ..models._enums import AuthenticationType, ConnectionType +from ..models._models import ConnectionsListSecretsResponse, ConnectionsListResponse +from .._types import AgentsApiResponseFormatOption +from ..models._patch import ConnectionProperties +from ..models._enums import FilePurpose +from .._vendor import FileType +from .. import models as _models + +from azure.core.tracing.decorator import distributed_trace + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from .. import _types + +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +_Unset: Any = object() + +logger = logging.getLogger(__name__) + + +class InferenceOperations: + + def __init__(self, outer_instance): + self.outer_instance = outer_instance + + @distributed_trace + def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": + """Get an authenticated ChatCompletionsClient (from the package azure-ai-inference) for the default + Serverless connection. The Serverless connection must have a Chat Completions AI model deployment. + The package `azure-ai-inference` must be installed prior to calling this method. + + :return: An authenticated chat completions client + :rtype: ~azure.ai.inference.models.ChatCompletionsClient + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connection = self.outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + ) + if not connection: + raise ValueError("No serverless connection found") + + try: + from azure.ai.inference import ChatCompletionsClient + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" + ) + + if connection.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" + ) + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) + ) + elif connection.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" + ) + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) + elif connection.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + logger.debug( + "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" + ) + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) + else: + raise ValueError("Unknown authentication type") + + return client + + @distributed_trace + def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": + """Get an authenticated EmbeddingsClient (from the package azure-ai-inference) for the default + Serverless connection. The Serverless connection must have a Text Embeddings AI model deployment. + The package `azure-ai-inference` must be installed prior to calling this method. + + :return: An authenticated chat completions client + :rtype: ~azure.ai.inference.models.EmbeddingsClient + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connection = self.outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + ) + if not connection: + raise ValueError("No serverless connection found") + + try: + from azure.ai.inference import EmbeddingsClient + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" + ) + + if connection.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" + ) + from azure.core.credentials import AzureKeyCredential + + client = EmbeddingsClient( + endpoint=connection.authentication_type, credential=AzureKeyCredential(connection.key) + ) + elif connection.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" + ) + client = EmbeddingsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) + elif connection.authentication_type == AuthenticationType.SAS: + # TODO - Not yet supported by the service. Expected 9/27. + logger.debug( + "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" + ) + client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) + else: + raise ValueError("Unknown authentication type") + + return client + + @distributed_trace + def get_azure_openai_client(self, **kwargs) -> "AzureOpenAI": + """Get an authenticated AzureOpenAI client (from the `openai` package) for the default + Azure OpenAI connection. The package `openai` must be installed prior to calling this method. + + :return: An authenticated AzureOpenAI client + :rtype: ~openai.AzureOpenAI + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connection = self.outer_instance.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs + ) + if not connection: + raise ValueError("No Azure OpenAI connection found") + + try: + from openai import AzureOpenAI + except ModuleNotFoundError as _: + raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai'") + + # Pick latest GA version from the "Data plane - Inference" row in the table + # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + AZURE_OPENAI_API_VERSION = "2024-06-01" + + if connection.authentication_type == AuthenticationType.API_KEY: + logger.debug( + "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" + ) + client = AzureOpenAI( + api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION + ) + elif connection.authentication_type == AuthenticationType.AAD: + logger.debug( + "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" + ) + try: + from azure.identity import get_bearer_token_provider + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "azure.identity package not installed. Please install it using 'pip install azure.identity'" + ) + client = AzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider( + connection.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=connection.endpoint_url, + api_version=AZURE_OPENAI_API_VERSION, + ) + elif connection.authentication_type == AuthenticationType.SAS: + logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") + client = AzureOpenAI( + azure_ad_token_provider=get_bearer_token_provider( + connection.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=connection.endpoint_url, + api_version=AZURE_OPENAI_API_VERSION, + ) + else: + raise ValueError("Unknown authentication type") + + return client + + +class ConnectionsOperations(ConnectionsOperationsGenerated): + + @distributed_trace + def get_default( + self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any + ) -> ConnectionProperties: + """Get the properties of the default connection of a certain connection type, with or without + populating authentication credentials. + + :param connection_type: The connection type. Required. + :type connection_type: ~azure.ai.projects.models._models.ConnectionType + :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :type with_credentials: bool + :return: The connection properties + :rtype: ~azure.ai.projects.models._models.ConnectionProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + if not connection_type: + raise ValueError("You must specify an connection type") + # Since there is no notion of default connection at the moment, list all connections in the category + # and return the first one + connection_properties_list = self.list(connection_type=connection_type, **kwargs) + if len(connection_properties_list) > 0: + if with_credentials: + return self.get( + connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs + ) + else: + return connection_properties_list[0] + else: + return None + + @distributed_trace + def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: + """Get the properties of a single connection, given its connection name, with or without + populating authentication credentials. + + :param connection_name: Connection Name. Required. + :type connection_name: str + :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :type with_credentials: bool + :return: The connection properties + :rtype: ~azure.ai.projects.models._models.ConnectionProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + if not connection_name: + raise ValueError("Connection name cannot be empty") + if with_credentials: + connection: ConnectionsListSecretsResponse = self._list_secrets( + connection_name=connection_name, ignored="ignore", **kwargs + ) + if connection.properties.auth_type == AuthenticationType.AAD: + return ConnectionProperties(connection=connection, token_credential=self._config.credential) + elif connection.properties.auth_type == AuthenticationType.SAS: + from ..models._patch import SASTokenCredential + + token_credential = SASTokenCredential( + sas_token=connection.properties.credentials.sas, + credential=self._config.credential, + subscription_id=self._config.subscription_id, + resource_group_name=self._config.resource_group_name, + project_name=self._config.project_name, + connection_name=connection_name, + ) + return ConnectionProperties(connection=connection, token_credential=token_credential) + + return ConnectionProperties(connection=connection) + else: + return ConnectionProperties(connection=self._get(connection_name=connection_name, **kwargs)) + + @distributed_trace + def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) -> Iterable[ConnectionProperties]: + """List the properties of all connections, or all connections of a certain connection type. + + :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. + If not provided, all connections are listed. + :type connection_type: ~azure.ai.projects.models._models.ConnectionType + :return: A list of connection properties + :rtype: Iterable[~azure.ai.projects.models._models.ConnectionProperties] + :raises ~azure.core.exceptions.HttpResponseError: + """ + kwargs.setdefault("merge_span", True) + connections_list: ConnectionsListResponse = self._list(include_all=True, category=connection_type, **kwargs) + + # Iterate to create the simplified result property + connection_properties_list: List[ConnectionProperties] = [] + for connection in connections_list.value: + connection_properties_list.append(ConnectionProperties(connection=connection)) + + return connection_properties_list + + +class AgentsOperations(AgentsOperationsGenerated): + @overload + def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword tools: The collection of tools to enable for the new agent. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, the ``code_interpreter`` + tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector + store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent( + self, + *, + model: str, + content_type: str = "application/json", + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.Agent: + """Creates a new agent. + + :keyword model: The ID of the model to use. Required. + :paramtype model: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword name: The name of the new agent. Default value is None. + :paramtype name: str + :keyword description: The description of the new agent. Default value is None. + :paramtype description: str + :keyword instructions: The system instructions for the new agent to use. Default value is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: + """Creates a new agent. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_agent( + self, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + content_type: str = "application/json", + **kwargs: Any, + ) -> _models.Agent: + """ + Creates a new agent with various configurations, delegating to the generated operations. + + :param body: JSON or IO[bytes]. Required if `model` is not provided. + :param model: The ID of the model to use. Required if `body` is not provided. + :param name: The name of the new agent. + :param description: A description for the new agent. + :param instructions: System instructions for the agent. + :param tools: List of tools definitions for the agent. + :param tool_resources: Resources used by the agent's tools. + :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). + :param temperature: Sampling temperature for generating agent responses. + :param top_p: Nucleus sampling parameter. + :param response_format: Response format for tool calls. + :param metadata: Key/value pairs for storing additional information. + :param content_type: Content type of the body. + :param kwargs: Additional parameters. + :return: An Agent object. + :raises: HttpResponseError for HTTP errors. + """ + if body is not _Unset: + if isinstance(body, IOBase): + return super().create_agent(body=body, content_type=content_type, **kwargs) + return super().create_agent(body=body, **kwargs) + + if toolset is not None: + self._toolset = toolset + tools = toolset.definitions + tool_resources = toolset.resources + + return super().create_agent( + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + def get_toolset(self) -> Optional[_models.ToolSet]: + """ + Get the toolset for the agent. + + :return: The toolset for the agent. If not set, returns None. + :rtype: ~azure.ai.projects.models.ToolSet + """ + if hasattr(self, "_toolset"): + return self._toolset + return None + + @overload + def create_run( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_run( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_run( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=False, + stream=False, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # If streaming is enabled, return the custom stream object + return response + + @distributed_trace + def create_and_process_run( + self, + thread_id: str, + assistant_id: str, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: int = 1, + **kwargs: Any, + ) -> _models.ThreadRun: + """Creates a new run for an agent thread and processes the run. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. + Default value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run + the thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or + ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or + ~azure.ai.projects.models.AgentsApiResponseFormatMode or + ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: The time in seconds to wait between polling the service for run status. + Default value is 1. + :paramtype sleep_interval: int + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + # Create and initiate the run with additional parameters + run = self.create_run( + thread_id=thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + # Monitor and process the run status + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(sleep_interval) + run = self.get_run(thread_id=thread_id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logging.warning("No tool calls provided - cancelling run") + self.cancel_run(thread_id=thread_id, run_id=run.id) + break + + toolset = self.get_toolset() + if toolset: + tool_outputs = toolset.execute_tool_calls(tool_calls) + else: + raise ValueError("Toolset is not available in the client.") + + logging.info("Tool outputs: %s", tool_outputs) + if tool_outputs: + self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) + + logging.info("Current run status: %s", run.status) + + return run + + @overload + def create_stream( + self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentRunStream: + """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_stream( + self, + thread_id: str, + *, + assistant_id: str, + content_type: str = "application/json", + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AgentRunStream: + """Creates a new stream for an agent thread. + + :param thread_id: Required. + :type thread_id: str + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_stream( + self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentRunStream: + """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_stream( + self, + thread_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + assistant_id: str = _Unset, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[_models.ThreadMessage]] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + truncation_strategy: Optional[_models.TruncationObject] = None, + tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AgentRunStream: + """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword assistant_id: The ID of the agent that should run the thread. Required. + :paramtype assistant_id: str + :keyword model: The overridden model name that the agent should use to run the thread. Default + value is None. + :paramtype model: str + :keyword instructions: The overridden system instructions that the agent should use to run the + thread. Default value is None. + :paramtype instructions: str + :keyword additional_instructions: Additional instructions to append at the end of the + instructions for the run. This is useful for modifying the behavior + on a per-run basis without overriding other instructions. Default value is None. + :paramtype additional_instructions: str + :keyword additional_messages: Adds additional messages to the thread before creating the run. + Default value is None. + :paramtype additional_messages: list[~azure.ai.projects.models.ThreadMessage] + :keyword tools: The overridden list of enabled tools that the agent should use to run the + thread. Default value is None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output + more random, while lower values like 0.2 will make it more focused and deterministic. Default + value is None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model + considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the + course of the run. The run will make a best effort to use only + the number of prompt tokens specified, across multiple turns of the run. If the run exceeds + the number of prompt tokens specified, + the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default + value is None. + :paramtype max_prompt_tokens: int + :keyword max_completion_tokens: The maximum number of completion tokens that may be used over + the course of the run. The run will make a best effort + to use only the number of completion tokens specified, across multiple turns of the run. If + the run exceeds the number of + completion tokens specified, the run will end with status ``incomplete``. See + ``incomplete_details`` for more info. Default value is None. + :paramtype max_completion_tokens: int + :keyword truncation_strategy: The strategy to use for dropping messages as the context windows + moves forward. Default value is None. + :paramtype truncation_strategy: ~azure.ai.projects.models.TruncationObject + :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of + the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], + AgentsNamedToolChoice Default value is None. + :paramtype tool_choice: str or str or ~azure.ai.projects.models.AgentsApiToolChoiceOptionMode or + ~azure.ai.projects.models.AgentsNamedToolChoice + :keyword response_format: Specifies the format that the model must output. Is one of the + following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): # Handle overload with JSON body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + elif assistant_id is not _Unset: # Handle overload with keyword arguments. + response = super().create_run( + thread_id, + assistant_id=assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + tools=tools, + stream_parameter=True, + stream=True, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + truncation_strategy=truncation_strategy, + tool_choice=tool_choice, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + elif isinstance(body, io.IOBase): # Handle overload with binary body. + content_type = kwargs.get("content_type", "application/json") + response = super().create_run(thread_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) + + return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_run( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_run( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.ThreadRun: + """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :param event_handler: The event handler to use for processing events during the run. + :param kwargs: Additional parameters. + :return: ThreadRun. The ThreadRun is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadRun + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # If streaming is enabled, return the custom stream object + return response + + @overload + def submit_tool_outputs_to_stream( + self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + *, + tool_outputs: List[_models.ToolOutput], + content_type: str = "application/json", + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword event_handler: The event handler to use for processing events during the run. Default + value is None. + :paramtype event_handler: ~azure.ai.projects.models.AgentEventHandler + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def submit_tool_outputs_to_stream( + self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def submit_tool_outputs_to_stream( + self, + thread_id: str, + run_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + tool_outputs: List[_models.ToolOutput] = _Unset, + event_handler: Optional[_models.AgentEventHandler] = None, + **kwargs: Any, + ) -> _models.AgentRunStream: + """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool + outputs will have a status of 'requires_action' with a required_action.type of + 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + + :param thread_id: Required. + :type thread_id: str + :param run_id: Required. + :type run_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword tool_outputs: Required. + :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] + :param event_handler: The event handler to use for processing events during the run. + :param kwargs: Additional parameters. + :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. + :rtype: ~azure.ai.projects.models.AgentRunStream + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if isinstance(body, dict): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + elif tool_outputs is not _Unset: + response = super().submit_tool_outputs_to_run( + thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs + ) + + elif isinstance(body, io.IOBase): + content_type = kwargs.get("content_type", "application/json") + response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) + + else: + raise ValueError("Invalid combination of arguments provided.") + + # Cast the response to Iterator[bytes] for type correctness + response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) + + return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) + + def _handle_submit_tool_outputs( + self, run: _models.ThreadRun, event_handler: Optional[_models.AgentEventHandler] = None + ) -> None: + if isinstance(run.required_action, _models.SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + logger.debug("No tool calls to execute.") + return + + toolset = self.get_toolset() + if toolset: + tool_outputs = toolset.execute_tool_calls(tool_calls) + else: + logger.warning("Toolset is not available in the client.") + return + + logger.info(f"Tool outputs: {tool_outputs}") + if tool_outputs: + with self.submit_tool_outputs_to_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler + ) as stream: + stream.until_done() + + @overload + def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file( + self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.projects._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file( + self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def upload_file( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :param file: File content. Required if `body` and `purpose` are not provided. + :param file_path: Path to the file. Required if `body` and `purpose` are not provided. + :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :param filename: The name of the file. + :param kwargs: Additional parameters. + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + return super().upload_file(body=body, **kwargs) + + if isinstance(purpose, FilePurpose): + purpose = purpose.value + + if file is not None and purpose is not None: + return super().upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + + if file_path is not None and purpose is not None: + if not os.path.isfile(file_path): + raise FileNotFoundError(f"The file path provided does not exist: {file_path}") + + try: + with open(file_path, "rb") as f: + content = f.read() + + # Determine filename and create correct FileType + base_filename = filename or os.path.basename(file_path) + file_content: FileType = (base_filename, content) + + return super().upload_file(file=file_content, purpose=purpose, **kwargs) + except IOError as e: + raise IOError(f"Unable to read file: {file_path}. Reason: {str(e)}") + + raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") + + @overload + def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param body: Required. + :type body: JSON + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file_and_poll( + self, + *, + file: FileType, + purpose: Union[str, _models.FilePurpose], + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :keyword file: Required. + :paramtype file: ~azure.ai.projects._vendor.FileType + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword filename: Default value is None. + :paramtype filename: str + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def upload_file_and_poll( + self, file_path: str, *, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any + ) -> _models.OpenAIFile: + """Uploads a file for use by other operations. + + :param file_path: Required. + :type file_path: str + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required. + :paramtype purpose: str or ~azure.ai.projects.models.FilePurpose + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.OpenAIFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def upload_file_and_poll( + self, + body: Optional[JSON] = None, + *, + file: Optional[FileType] = None, + file_path: Optional[str] = None, + purpose: Union[str, _models.FilePurpose, None] = None, + filename: Optional[str] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.OpenAIFile: + """ + Uploads a file for use by other operations, delegating to the generated operations. + + :param body: JSON. Required if `file` and `purpose` are not provided. + :param file: File content. Required if `body` and `purpose` are not provided. + :param file_path: Path to the file. Required if `body` and `purpose` are not provided. + :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. + :param filename: The name of the file. + :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value + is 1. + :paramtype sleep_interval: float + :param kwargs: Additional parameters. + :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :raises FileNotFoundError: If the file_path is invalid. + :raises IOError: If there are issues with reading the file. + :raises: HttpResponseError for HTTP errors. + """ + if body is not None: + uploaded_file = self.upload_file(body=body, **kwargs) + elif file is not None and purpose is not None: + uploaded_file = self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) + elif file_path is not None and purpose is not None: + uploaded_file = self.upload_file(file_path=file_path, purpose=purpose, **kwargs) + else: + raise ValueError( + "Invalid parameters for upload_file_and_poll. Please provide either 'body', " + "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." + ) + + while uploaded_file.status in ["uploaded", "pending", "running"]: + time.sleep(sleep_interval) + uploaded_file = self.get_file(uploaded_file.id) + + return uploaded_file + + @overload + def create_vector_store_and_poll( + self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_and_poll( + self, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_and_poll( + self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_and_poll( + self, + body: Union[JSON, IO[bytes], None] = None, + *, + content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + name: Optional[str] = None, + expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + metadata: Optional[Dict[str, str]] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStore: + """Creates a vector store and poll. + + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like + ``file_search`` that can access files. Default value is None. + :paramtype file_ids: list[str] + :keyword name: The name of the vector store. Default value is None. + :paramtype name: str + :keyword expires_after: Details on when this vector store expires. Default value is None. + :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStore. The VectorStore is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStore + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is not None: + vector_store = self.create_vector_store(body=body, content_type=content_type, **kwargs) + elif file_ids is not None or (name is not None and expires_after is not None): + vector_store = self.create_vector_store( + content_type=content_type, + file_ids=file_ids, + name=name, + expires_after=expires_after, + chunking_strategy=chunking_strategy, + metadata=metadata, + **kwargs, + ) + else: + raise ValueError( + "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " + "'file_ids', or 'name' and 'expires_after'." + ) + + while vector_store.status == "in_progress": + time.sleep(sleep_interval) + vector_store = self.get_vector_store(vector_store.id) + + return vector_store + + @overload + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + *, + file_ids: List[str], + content_type: str = "application/json", + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file_batch_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = None, + *, + file_ids: List[str] = _Unset, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFileBatch: + """Create a vector store file batch and poll. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_ids: List of file identifiers. Required. + :paramtype file_ids: list[str] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFileBatch + :raises ~azure.core.exceptions.HttpResponseError: + """ + + if body is None: + vector_store_file_batch = super().create_vector_store_file_batch( + vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs + ) + else: + content_type = kwargs.get("content_type", "application/json") + vector_store_file_batch = super().create_vector_store_file_batch( + body=body, content_type=content_type, **kwargs + ) + + while vector_store_file_batch.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file_batch = super().get_vector_store_file_batch( + vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id + ) + + return vector_store_file_batch + + +__all__: List[str] = [ + "AgentsOperations", + "ConnectionsOperations", + "InferenceOperations", +] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/py.typed b/sdk/ai/azure-ai-projects/azure/ai/projects/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/dev_requirements.txt b/sdk/ai/azure-ai-projects/dev_requirements.txt new file mode 100644 index 000000000000..c82827bb56f4 --- /dev/null +++ b/sdk/ai/azure-ai-projects/dev_requirements.txt @@ -0,0 +1,4 @@ +-e ../../../tools/azure-sdk-tools +../../core/azure-core +../../identity/azure-identity +aiohttp \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py new file mode 100644 index 000000000000..69c794672e78 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py @@ -0,0 +1,76 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_basics_async.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_basics_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import time + +from azure.ai.projects.aio import AIProjectClient +from azure.identity import DefaultAzureCredential + +import os + + +async def main(): + + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + + # poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py new file mode 100644 index 000000000000..67bdf21fff42 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py @@ -0,0 +1,117 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_functions_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with custom functions from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_functions_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import time + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import AsyncFunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction +from azure.identity import DefaultAzureCredential + +import os + +from user_async_functions import user_async_functions + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + # Initialize assistant functions + functions = AsyncFunctionTool(functions=user_async_functions) + + # Create agent + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=functions.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + # Create thread for communication + thread = await project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create and send message + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, what's the time?" + ) + print(f"Created message, ID: {message.id}") + + # Create and run assistant task + run = await project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, ID: {run.id}") + + # Polling loop for run status + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(4) + run = await project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + await project_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = await functions.execute(tool_call) + tool_outputs.append( + { + "tool_call_id": tool_call.id, + "output": output, + } + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + await project_client.agents.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the agent when done + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py new file mode 100644 index 000000000000..475873498017 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -0,0 +1,96 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler in streaming from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_stream_eventhandler_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +from typing import Any + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models._models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.projects.models._patch import AsyncAgentEventHandler +from azure.identity import DefaultAzureCredential + +import os + + +class MyEventHandler(AsyncAgentEventHandler): + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + async def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + async def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + async def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + async def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + async def on_done(self) -> None: + print("Stream completed.") + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID {message.id}") + + async with await project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() + ) as stream: + await stream.until_done() + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py new file mode 100644 index 000000000000..bbbdab530f1b --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py @@ -0,0 +1,111 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_with_toolset_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler and toolset from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_toolset_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +from typing import Any + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.projects.models import AsyncAgentEventHandler, AsyncFunctionTool, AsyncToolSet +from azure.ai.projects.aio.operations import AgentsOperations +from azure.identity import DefaultAzureCredential + +import os + +from user_async_functions import user_async_functions + + +class MyEventHandler(AsyncAgentEventHandler): + + async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + async def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + async def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + async def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + async def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + async def on_done(self) -> None: + print("Stream completed.") + + async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + # Initialize toolset with user functions + functions = AsyncFunctionTool(user_async_functions) + toolset = AsyncToolSet() + toolset.add(functions) + + async with project_client: + + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", toolset=toolset + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", + ) + print(f"Created message, message ID {message.id}") + + async with await project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() + ) as stream: + await stream.until_done() + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py new file mode 100644 index 000000000000..ab7799bb5c31 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py @@ -0,0 +1,92 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_iteration_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with interation in streaming from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_stream_iteration_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import AgentStreamEvent +from azure.ai.projects.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.identity import DefaultAzureCredential + +import os + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID {message.id}") + + async with await project_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: + async for event_type, event_data in stream: + + if isinstance(event_data, MessageDeltaChunk): + for content_part in event_data.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py new file mode 100644 index 000000000000..eb04c0e98673 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -0,0 +1,94 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_vector_store_batch_file_search_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_vector_store_batch_file_search_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import asyncio +import os +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import FileSearchTool, FilePurpose +from azure.identity import DefaultAzureCredential + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + + # upload a file and wait for it to be processed + file = await project_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # create a vector store with no file and wait for it to be processed + vector_store = await project_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = await project_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, file_ids=[file.id] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + await project_client.agents.delete_file(file.id) + print("Deleted file") + + await project_client.agents.delete_vector_store(vector_store.id) + print("Deleted vectore store") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py new file mode 100644 index 000000000000..4686f3f9fc29 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -0,0 +1,83 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_with_file_search_attachment_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations to create messages with file search attachments from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_with_file_search_attachment_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import FilePurpose +from azure.ai.projects.models import FileSearchTool, MessageAttachment, ToolResources +from azure.identity import DefaultAzureCredential + +import os + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + # upload a file and wait for it to be processed + async with project_client: + file = await project_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) + + # Create agent with file search tool + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id, sleep_interval=4 + ) + print(f"Created run, run ID: {run.id}") + + print(f"Run completed with status: {run.status}") + + await project_client.agents.delete_file(file.id) + print("Deleted file") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py new file mode 100644 index 000000000000..4931352e03c6 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py @@ -0,0 +1,29 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import asyncio +import os +import sys + + +# Add parent directory to sys.path to import user_functions +current_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.abspath(os.path.join(current_dir, "..")) +if parent_dir not in sys.path: + sys.path.insert(0, parent_dir) +from user_functions import fetch_current_datetime, fetch_weather, send_email + + +async def send_email_async(recipient: str, subject: str, body: str) -> str: + await asyncio.sleep(1) + return send_email(recipient, subject, body) + + +# Statically defined user functions for fast reference with send_email as async but the rest as sync +user_async_functions = { + "fetch_current_datetime": fetch_current_datetime, + "fetch_weather": fetch_weather, + "send_email": send_email_async, +} diff --git a/sdk/ai/azure-ai-projects/samples/agents/product_info_1.md b/sdk/ai/azure-ai-projects/samples/agents/product_info_1.md new file mode 100644 index 000000000000..041155831d53 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/product_info_1.md @@ -0,0 +1,51 @@ +# Information about product item_number: 1 + +## Brand +Contoso Galaxy Innovations + +## Category +Smart Eyewear + +## Features +- Augmented Reality interface +- Voice-controlled AI assistant +- HD video recording with 3D audio +- UV protection and blue light filtering +- Wireless charging with extended battery life + +## User Guide + +### 1. Introduction +Introduction to your new SmartView Glasses + +### 2. Product Overview +Overview of features and controls + +### 3. Sizing and Fit +Finding your perfect fit and style adjustments + +### 4. Proper Care and Maintenance +Cleaning and caring for your SmartView Glasses + +### 5. Break-in Period +Adjusting to the augmented reality experience + +### 6. Safety Tips +Safety guidelines for public and private spaces + +### 7. Troubleshooting +Quick fixes for common issues + +## Warranty Information +Two-year limited warranty on all electronic components + +## Contact Information +Customer Support at support@contoso-galaxy-innovations.com + +## Return Policy +30-day return policy with no questions asked + +## FAQ +- How to sync your SmartView Glasses with your devices +- Troubleshooting connection issues +- Customizing your augmented reality environment diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py new file mode 100644 index 000000000000..66b14bfc2af3 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py @@ -0,0 +1,63 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_basics.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_basics.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os, time +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + + # poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment.py new file mode 100644 index 000000000000..409846279762 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment.py @@ -0,0 +1,80 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_code_interpreter_attachment.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with code interpreter from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_code_interpreter_attachment.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import CodeInterpreterTool +from azure.ai.projects.models import FilePurpose +from azure.ai.projects.models import MessageAttachment +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + # upload a file and wait for it to be processed + file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool() + + # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # create a message with the attachment + attachment = MessageAttachment(file_id=file.id, tools=code_interpreter.definitions) + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + project_client.agents.delete_file(file.id) + print("Deleted file") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py new file mode 100644 index 000000000000..30776b1ebac6 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py @@ -0,0 +1,87 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_file_search.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with file searching from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_file_search.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models._patch import FileSearchTool +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + + openai_file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") + print(f"Uploaded file, file ID: {openai_file.id}") + + openai_vectorstore = project_client.agents.create_vector_store_and_poll(file_ids=[openai_file.id], name="my_vectorstore") + print(f"Created vector store, vector store ID: {openai_vectorstore.id}") + + # Create file search tool with resources + file_search = FileSearchTool(vector_store_ids=[openai_vectorstore.id]) + + # Create agent with file search tool and process assistant run + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + # Create thread for communication + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + # Delete the file when done + project_client.agents.delete_vector_store(openai_vectorstore.id) + print("Deleted vector store") + + # Delete the agent when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py new file mode 100644 index 000000000000..401b0403ea95 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py @@ -0,0 +1,105 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_functions.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with custom functions from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_functions.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os, time +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import FunctionTool, SubmitToolOutputsAction, RequiredFunctionToolCall +from user_functions import user_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +# Initialize function tool with user functions +functions = FunctionTool(functions=user_functions) + +with project_client: + # Create an agent and run user's request with function calls + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + print(f"Created agent, ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, ID: {run.id}") + + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + project_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append( + { + "tool_call_id": tool_call.id, + "output": output, + } + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + project_client.agents.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the agent when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py new file mode 100644 index 000000000000..a684b367db44 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py @@ -0,0 +1,80 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_run_with_toolset.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_run_with_toolset.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import FunctionTool, ToolSet, CodeInterpreterTool +from user_functions import user_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Initialize agent toolset with user functions and code interpreter +functions = FunctionTool(user_functions) +code_interpreter = CodeInterpreterTool() + +toolset = ToolSet() +toolset.add(functions) +toolset.add(code_interpreter) + +# Create agent with toolset and process assistant run +with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset + ) + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py new file mode 100644 index 000000000000..a59d4e322c6b --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py @@ -0,0 +1,98 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler in streaming from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential + +from azure.ai.projects.models import ( + AgentEventHandler, + MessageDeltaTextContent, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) + +from typing import Any + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + + +class MyEventHandler(AgentEventHandler): + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +with project_client: + # Create an agent and run stream with event handler + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created agent, agent ID {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + with project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py new file mode 100644 index 000000000000..713ceca95243 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py @@ -0,0 +1,132 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_with_functions.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler and toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_functions.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.projects.models import AgentEventHandler +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction + +from typing import Any + +from user_functions import user_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + + +class MyEventHandler(AgentEventHandler): + + def __init__(self, functions: FunctionTool) -> None: + self.functions = functions + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append( + { + "tool_call_id": tool_call.id, + "output": output, + } + ) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + with project_client.agents.submit_tool_outputs_to_stream( + thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=self + ) as stream: + stream.until_done() + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +with project_client: + functions = FunctionTool(user_functions) + + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + print(f"Created agent, ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", + ) + print(f"Created message, message ID {message.id}") + + with project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler(functions) + ) as stream: + stream.until_done() + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py new file mode 100644 index 000000000000..41ab2ba587f5 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -0,0 +1,109 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_with_toolset.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler and toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_eventhandler_with_toolset.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import Agent, MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.projects.models import AgentEventHandler +from azure.ai.projects.operations import AgentsOperations +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import FunctionTool, ToolSet + + +import os +from typing import Any + +from user_functions import user_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + + +# When using FunctionTool with ToolSet in agent creation, the tool call events are handled inside the create_stream +# method and functions gets automatically called by default. +class MyEventHandler(AgentEventHandler): + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + if run.status == "failed": + print(f"Run failed. Error: {run.last_error}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + +with project_client: + functions = FunctionTool(user_functions) + toolset = ToolSet() + toolset.add(functions) + + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset + ) + print(f"Created agent, ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", + ) + print(f"Created message, message ID {message.id}") + + with project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration.py new file mode 100644 index 000000000000..814685dd1379 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration.py @@ -0,0 +1,92 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_iteration.py + +DESCRIPTION: + This sample demonstrates how to use agent operations in streaming from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_iteration.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import ( + AgentStreamEvent, + MessageDeltaTextContent, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +with project_client: + # Create an agent and run stream with iteration + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created agent, ID {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + with project_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: + + for event_type, event_data in stream: + + if isinstance(event_data, MessageDeltaChunk): + for content_part in event_data.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration_with_toolset.py new file mode 100644 index 000000000000..3337f28954f2 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration_with_toolset.py @@ -0,0 +1,122 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_iteration_with_toolset.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with toolset and iteration in streaming from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_stream_iteration_with_toolset.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import AgentStreamEvent +from azure.ai.projects.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.projects.models import FunctionTool, ToolSet +from azure.ai.projects.operations import AgentsOperations +from azure.identity import DefaultAzureCredential +from user_functions import user_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + + +# Function to handle tool stream iteration +def handle_submit_tool_outputs(operations: AgentsOperations, thread_id, run_id, tool_outputs): + try: + with operations.submit_tool_outputs_to_stream( + thread_id=thread_id, + run_id=run_id, + tool_outputs=tool_outputs, + ) as tool_stream: + for tool_event_type, tool_event_data in tool_stream: + if tool_event_type == AgentStreamEvent.ERROR: + print(f"An error occurred in tool stream. Data: {tool_event_data}") + elif tool_event_type == AgentStreamEvent.DONE: + print("Tool stream completed.") + break + else: + if isinstance(tool_event_data, MessageDeltaChunk): + handle_message_delta(tool_event_data) + + except Exception as e: + print(f"Failed to process tool stream: {e}") + + +# Function to handle message delta chunks +def handle_message_delta(delta: MessageDeltaChunk) -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + +functions = FunctionTool(user_functions) +toolset = ToolSet() +toolset.add(functions) + +with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what's the time?") + print(f"Created message, message ID {message.id}") + + with project_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: + + for event_type, event_data in stream: + + if isinstance(event_data, MessageDeltaChunk): + handle_message_delta(event_data) + + elif isinstance(event_data, ThreadMessage): + print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") + + elif isinstance(event_data, ThreadRun): + print(f"ThreadRun status: {event_data.status}") + + if event_data.status == "failed": + print(f"Run failed. Error: {event_data.last_error}") + + elif isinstance(event_data, RunStep): + print(f"RunStep type: {event_data.type}, Status: {event_data.status}") + + elif event_type == AgentStreamEvent.ERROR: + print(f"An error occurred. Data: {event_data}") + + elif event_type == AgentStreamEvent.DONE: + print("Stream completed.") + break + + else: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py new file mode 100644 index 000000000000..c26ce41cbebe --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -0,0 +1,88 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_vector_store_batch_file_search_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_vector_store_batch_file_search_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import FileSearchTool, FilePurpose +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + + # upload a file and wait for it to be processed + file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # create a vector store with no file and wait for it to be processed + vector_store = project_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = project_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, file_ids=[file.id] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + project_client.agents.delete_file(file.id) + print("Deleted file") + + project_client.agents.delete_vector_store(vector_store.id) + print("Deleted vectore store") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py new file mode 100644 index 000000000000..a318c2b51c4f --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py @@ -0,0 +1,75 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_with_file_search_attachment.py + +DESCRIPTION: + This sample demonstrates how to use agent operations to create messages with file search attachments from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_with_file_search_attachment.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import FilePurpose +from azure.ai.projects.models import MessageAttachment +from azure.ai.projects.models import FileSearchTool +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + + # upload a file and wait for it to be processed + file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # Create agent with file search tool + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + project_client.agents.delete_file(file.id) + print("Deleted file") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/user_functions.py b/sdk/ai/azure-ai-projects/samples/agents/user_functions.py new file mode 100644 index 000000000000..8072b1b8a944 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/user_functions.py @@ -0,0 +1,65 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import json +import datetime + +# These are the user-defined functions that can be called by the agent. + + +def fetch_current_datetime() -> str: + """ + Get the current time as a JSON string. + + :return: The current time in JSON format. + :rtype: str + """ + current_time = datetime.datetime.now() + time_json = json.dumps({"current_time": current_time.strftime("%Y-%m-%d %H:%M:%S")}) + return time_json + + +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + + +def send_email(recipient: str, subject: str, body: str) -> str: + """ + Sends an email with the specified subject and body to the recipient. + + :param recipient (str): Email address of the recipient. + :param subject (str): Subject of the email. + :param body (str): Body content of the email. + :return: Confirmation message. + :rtype: str + """ + # In a real-world scenario, you'd use an SMTP server or an email service API. + # Here, we'll mock the email sending. + print(f"Sending email to {recipient}...") + print(f"Subject: {subject}") + print(f"Body:\n{body}") + + message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) + return message_json + + +# Statically defined user functions for fast reference +user_functions = { + "fetch_current_datetime": fetch_current_datetime, + "fetch_weather": fetch_weather, + "send_email": send_email, +} diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py new file mode 100644 index 000000000000..122c166b3748 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -0,0 +1,142 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_connections_async.py + +DESCRIPTION: + Given an asynchronous AIProjectClient, this sample demonstrates how to enumerate connections + and get connections properties. + +USAGE: + python sample_connections_async.py + + Before running the sample: + + pip install azure.ai.projects aiohttp azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in the "Project overview" + tab in your AI Studio Project page. + 2) CONNECTION_NAME - the name of a Serverless or Azure OpenAI connection, as found in the "Connections" tab + in your AI Studio Hub page. +""" + +import asyncio +import os +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import ConnectionType, AuthenticationType +from azure.identity import DefaultAzureCredential + + +async def sample_connections_async(): + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) + + async with project_client: + + # List the properties of all connections + connections = await project_client.connections.list() + print(f"====> Listing of all connections (found {len(connections)}):") + for connection in connections: + print(connection) + + # List the properties of all connections of a particular "type" (In this sample, Azure OpenAI connections) + connections = await project_client.connections.list( + connection_type=ConnectionType.AZURE_OPEN_AI, + ) + print("====> Listing of all Azure Open AI connections (found {len(connections)}):") + for connection in connections: + print(connection) + + # Get the properties of the default connection of a particular "type", with credentials + connection = await project_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, + with_credentials=True, # Optional. Defaults to "False" + ) + print("====> Get default Azure Open AI connection:") + print(connection) + + # Get the properties of a connection by connection name: + connection = await project_client.connections.get( + connection_name=os.environ["CONNECTION_NAME"], + with_credentials=True, # Optional. Defaults to "False" + ) + print("====> Get connection by name:") + print(connection) + + # Examples of how you would create Inference client + if connection.connection_type == ConnectionType.AZURE_OPEN_AI: + + from openai import AsyncAzureOpenAI + + if connection.authentication_type == AuthenticationType.API_KEY: + print("====> Creating AzureOpenAI client using API key authentication") + client = AsyncAzureOpenAI( + api_key=connection.key, + azure_endpoint=connection.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + ) + elif connection.authentication_type == AuthenticationType.AAD: + print("====> Creating AzureOpenAI client using Entra ID authentication") + from azure.identity import get_bearer_token_provider + + client = AsyncAzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider( + connection.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=connection.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + ) + else: + raise ValueError(f"Authentication type {connection.authentication_type} not supported.") + + response = await client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + print(response.choices[0].message.content) + + elif connection.connection_type == ConnectionType.SERVERLESS: + + from azure.ai.inference.aio import ChatCompletionsClient + from azure.ai.inference.models import UserMessage + + if connection.authentication_type == AuthenticationType.API_KEY: + print("====> Creating ChatCompletionsClient using API key authentication") + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) + ) + elif connection.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + print("====> Creating ChatCompletionsClient using Entra ID authentication") + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) + else: + raise ValueError(f"Authentication type {connection.authentication_type} not supported.") + + response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + await client.close() + print(response.choices[0].message.content) + + +async def main(): + await sample_connections_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py new file mode 100644 index 000000000000..50d7fe8da1c1 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -0,0 +1,123 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_connections.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to enumerate connections + and get connection properties. + +USAGE: + python sample_connections.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set these environment variables with your own values: + 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in the "Project overview" + tab in your AI Studio Project page. + 2) CONNECTION_NAME - the name of a Serverless or Azure OpenAI connection, as found in the "Connections" tab + in your AI Studio Hub page. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import ConnectionType, AuthenticationType +from openai import AzureOpenAI +from azure.ai.inference import ChatCompletionsClient +from azure.ai.inference.models import UserMessage +from azure.identity import DefaultAzureCredential, get_bearer_token_provider +from azure.core.credentials import AzureKeyCredential + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +with project_client: + # List the properties of all connections + connections = project_client.connections.list() + print(f"====> Listing of all connections (found {len(connections)}):") + for connection in connections: + print(connection) + + # List the properties of all connections of a particular "type" (In this sample, Azure OpenAI connections) + connections = project_client.connections.list( + connection_type=ConnectionType.AZURE_OPEN_AI, + ) + print("====> Listing of all Azure Open AI connections (found {len(connections)}):") + for connection in connections: + print(connection) + + # Get the properties of the default connection of a particular "type", with credentials + connection = project_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, + with_credentials=True, # Optional. Defaults to "False" + ) + print("====> Get default Azure Open AI connection:") + print(connection) + + # Get the properties of a connection by connection name: + connection = project_client.connections.get( + connection_name=os.environ["CONNECTION_NAME"], with_credentials=True # Optional. Defaults to "False" + ) + print("====> Get connection by name:") + print(connection) + + +# Examples of how you would create Inference client +if connection.connection_type == ConnectionType.AZURE_OPEN_AI: + + if connection.authentication_type == AuthenticationType.API_KEY: + print("====> Creating AzureOpenAI client using API key authentication") + client = AzureOpenAI( + api_key=connection.key, + azure_endpoint=connection.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + ) + elif connection.authentication_type == AuthenticationType.AAD: + print("====> Creating AzureOpenAI client using Entra ID authentication") + client = AzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider( + connection.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=connection.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + ) + else: + raise ValueError(f"Authentication type {connection.authentication_type} not supported.") + + response = client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + client.close() + print(response.choices[0].message.content) + +elif connection.connection_type == ConnectionType.SERVERLESS: + + if connection.authentication_type == AuthenticationType.API_KEY: + print("====> Creating ChatCompletionsClient using API key authentication") + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + elif connection.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + print("====> Creating ChatCompletionsClient using Entra ID authentication") + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=connection.properties.token_credential + ) + else: + raise ValueError(f"Authentication type {connection.authentication_type} not supported.") + + response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + client.close() + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/evaluate_test_data.jsonl b/sdk/ai/azure-ai-projects/samples/evaluations/evaluate_test_data.jsonl new file mode 100644 index 000000000000..0396a22c2db6 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/evaluations/evaluate_test_data.jsonl @@ -0,0 +1,3 @@ +{"query":"How do you create a run?","context":"AML API only","response":"To create a run using the Azure Machine Learning API, you first need to create an Experiment. Once you have an experiment, you can create a Run object that is associated with that experiment. Here is some Python code that demonstrates this process:\n\n```\nfrom azureml.core import Experiment, Run\nfrom azureml.core.workspace import Workspace\n\n# Define workspace and experiment\nws = Workspace.from_config()\nexp = Experiment(workspace=ws, name='my_experiment')\n\n# Create a new run\nrun = exp.start_logging()\n```\n\nIn this code, the `from_config()` method reads the configuration file that you created when you set up your Azure Machine Learning workspace. The `Experiment` constructor creates an Experiment object that is associated with your workspace, and the `start_logging()` method creates a new Run object that is associated with the Experiment. Now you can use the `run` object to log metrics, upload files, and track other information related to your machine learning experiment.", "ground_truth":"Paris is the capital of France."} +{"query":"How do you log a model?","context":"Logging can be done using any OSS Sdk","response":"There are a few ways to log models in Azure Machine Learning. \n\nOne way is to use the `register_model()` method of the `Run` object. The `register_model()` method logs a model file in the Azure Machine Learning service workspace and makes it available for deployment. Here's an example:\n\n```python\nfrom azureml.core import Model\n\nmodel_path = '.\/outputs\/my_model.pkl'\nmodel = Model.register(workspace=ws, model_path=model_path, model_name='my_model')\n```\n\nThis code registers the model file located at `model_path` to the Azure Machine Learning service workspace with the name `my_model`. \n\nAnother way to log a model is to save it as an output of a `Run`. If your model generation code is part of a script or Jupyter notebook that runs as an Azure Machine Learning experiment, you can save the model file as an output of the `Run` object. Here's an example:\n\n```python\nfrom sklearn.linear_model import LogisticRegression\nfrom azureml.core.run import Run\n\n# Initialize a run object\nrun = Run.get_context()\n\n# Train your model\nX_train, y_train = ...\nclf = LogisticRegression().fit(X_train, y_train)\n\n# Save the model to the Run object's outputs directory\nmodel_path = 'outputs\/model.pkl'\njoblib.dump(value=clf, filename=model_path)\n\n# Log the model as a run artifact\nrun.upload_file(name=model_path, path_or_stream=model_path)\n```\n\nIn this code, `Run.get_context()` retrieves the current run context object, which you can use to track metadata and metrics for the run. After training your model, you can use `joblib.dump()` to save the model to a file, and then log the file as an artifact of the run using `run.upload_file()`.","ground_truth":"Paris is the capital of France."} +{"query":"What is the capital of France?","context":"France is in Europe","response":"Paris is the capital of France.", "ground_truth":"Paris is the capital of France."} diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py new file mode 100644 index 000000000000..5d351d2f7810 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py @@ -0,0 +1,88 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_basics.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_evaluations.py + + Before running the sample: + + pip install azure-identity + pip install "git+https://github.com/Azure/azure-sdk-for-python.git@users/singankit/ai_project_utils#egg=azure-ai-client&subdirectory=sdk/ai/azure-ai-client" + pip install "git+https://github.com/Azure/azure-sdk-for-python.git@users/singankit/demo_evaluators_id#egg=azure-ai-evaluation&subdirectory=sdk/evaluation/azure-ai-evaluation" + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os, time +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import Evaluation, Dataset, EvaluatorConfiguration, ConnectionType +from azure.ai.evaluation import F1ScoreEvaluator, RelevanceEvaluator, HateUnfairnessEvaluator + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Upload data for evaluation +# Service side fix needed to make this work +# data_id = project_client.upload_file("./evaluate_test_data.jsonl") +data_id = "azureml://locations/eastus2/workspaces/faa79f3d-91b3-4ed5-afdc-4cc0fe13fb85/data/remote-evals-data/versions/3" + +default_connection = project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) + + + +# Create an evaluation +evaluation = Evaluation( + display_name="Remote Evaluation", + description="Evaluation of dataset", + data=Dataset(id=data_id), + evaluators={ + "f1_score": EvaluatorConfiguration( + id=F1ScoreEvaluator.evaluator_id, + ), + "relevance": EvaluatorConfiguration( + id=RelevanceEvaluator.evaluator_id, + init_params={ + "model_config": default_connection.to_evaluator_model_config(deployment_name="GPT-4-Prod", api_version="2024-08-01-preview") + }, + ), + "hate_unfairness": EvaluatorConfiguration( + id=HateUnfairnessEvaluator.evaluator_id, + init_params={ + "azure_ai_project": project_client.scope + }, + ), + }, + # This is needed as a workaround until environment gets published to registry + properties={"Environment": "azureml://registries/jamahaja-evals-registry/environments/eval-remote-env/versions/6"}, +) + +# Create evaluation +evaluation_response = project_client.evaluations.create( + evaluation=evaluation, +) + +# Get evaluation +get_evaluation_response = project_client.evaluations.get(evaluation_response.id) + +print("----------------------------------------------------------------") +print("Created evaluation, evaluation ID: ", get_evaluation_response.id) +print("Evaluation status: ", get_evaluation_response.status) +print("AI Studio URI: ", get_evaluation_response.properties["AiStudioEvaluationUri"]) +print("----------------------------------------------------------------") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py new file mode 100644 index 000000000000..5f24696dcb19 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py @@ -0,0 +1,75 @@ +from azure.ai.projects import AIProjectClient + +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import ApplicationInsightsConfiguration, EvaluatorConfiguration, SamplingStrategy, EvaluationSchedule, CronTrigger, RecurrenceTrigger, Frequency, RecurrenceSchedule + +def main(): + # Project Configuration Canary + Subscription = "72c03bf3-4e69-41af-9532-dfcdc3eefef4" + ResourceGroup = "apeddau-rg-westus2" + Workspace = "apeddau-canay-ws-eastus2euap" + Endpoint = "eastus2euap.api.azureml.ms" + + # Create an Azure AI client + ai_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=f"{Endpoint};{Subscription};{ResourceGroup};{Workspace}", + logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging + ) + + # Sample for creating an evaluation schedule with recurrence trigger of daily frequency + + app_insights_config = ApplicationInsightsConfiguration( + resource_id="/subscriptions/72c03bf3-4e69-41af-9532-dfcdc3eefef4/resourceGroups/apeddau-rg-centraluseuap/providers/Microsoft.insights/components/apeddauwscentr0026977484", + query="traces | where message contains \"\"", + service_name="sample_service_name" + ) + + f1_evaluator_config = EvaluatorConfiguration( + id="azureml://registries/model-evaluation-dev-01/models/F1ScoreEval/versions/1", + init_params={ + "column_mapping": { + "response": "${data.message}", + "ground_truth": "${data.itemType}" + } + } + ) + + recurrence_trigger = RecurrenceTrigger(frequency="daily", interval=1) + evaluators = { + "f1_score": f1_evaluator_config, + } + + sampling_strategy = SamplingStrategy(rate=0.2) + name = "CANARY-ONLINE-EVAL-TEST-WS-ENV-104" + description = "Testing Online eval command job in CANARY environment" + tags = {"tag1": "value1", "tag2": "value2"} + properties = {"Environment": "azureml://registries/apeddau-online-evals-registry/environments/online-eval-env/versions/1"} + + evaluation_schedule = EvaluationSchedule( + data=app_insights_config, + evaluators=evaluators, + trigger=recurrence_trigger, + sampling_strategy=sampling_strategy, + description=description, + tags=tags, + properties=properties + ) + + evaluation_schedule = ai_client.evaluations.create_or_replace_schedule(name, evaluation_schedule) + print(evaluation_schedule.provisioning_status) + print(evaluation_schedule) + + # Sample for get an evaluation schedule with name + evaluation_schedule = ai_client.evaluations.get_schedule(name) + print(evaluation_schedule) + + # Sample for list evaluation schedules + for evaluation_schedule in ai_client.evaluations.list_schedule(): + print(evaluation_schedule) + + # Sample for delete an evaluation schedule with name + ai_client.evaluations.delete_schedule(name) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_azure_openai_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_azure_openai_client_async.py new file mode 100644 index 000000000000..874f789c16f0 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_azure_openai_client_async.py @@ -0,0 +1,57 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_azure_openai_client_async.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + AsyncAzureOpenAI client from the azure.ai.inference package. + +USAGE: + python sample_get_azure_openai_client_async.py + + Before running the sample: + + pip install azure.ai.projects aiohttp openai_async + + Set this environment variable with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os +import asyncio +from azure.ai.projects.aio import AIProjectClient +from azure.identity import DefaultAzureCredential + + +async def sample_get_azure_openai_client_async(): + + async with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) as project_client: + + # Get an authenticated AsyncAzureOpenAI client for your default Azure OpenAI connection: + async with await project_client.inference.get_azure_openai_client() as client: + + response = await client.chat.completions.create( + model="gpt-4-0613", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print(response.choices[0].message.content) + + +async def main(): + await sample_get_azure_openai_client_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_chat_completions_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_chat_completions_client_async.py new file mode 100644 index 000000000000..27e50fd8359d --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_chat_completions_client_async.py @@ -0,0 +1,49 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_chat_completions_client_async.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + async ChatCompletionsClient from the azure.ai.inference package. + +USAGE: + python sample_get_chat_completions_client_async.py + + Before running the sample: + + pip install azure.ai.projects aiohttp azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os +import asyncio +from azure.ai.projects.aio import AIProjectClient +from azure.ai.inference.models import UserMessage +from azure.identity import DefaultAzureCredential + + +async def sample_get_chat_completions_client_async(): + + async with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) as project_client: + + # Get an authenticated async ChatCompletionsClient (from azure.ai.inference) for your default Serverless connection: + async with await project_client.inference.get_chat_completions_client() as client: + + response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + print(response.choices[0].message.content) + + +async def main(): + await sample_get_chat_completions_client_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_embeddings_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_embeddings_client_async.py new file mode 100644 index 000000000000..c7154a4f6aaa --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_embeddings_client_async.py @@ -0,0 +1,54 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_embeddings_client_async.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + async EmbeddingsClient from the azure.ai.inference package. + +USAGE: + python sample_get_embeddings_client_async.py + + Before running the sample: + + pip install azure.ai.projects aiohttp azure-identity + + Set this environment variable with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import os +from azure.ai.projects.aio import AIProjectClient +from azure.identity import DefaultAzureCredential + + +async def sample_get_embeddings_client_async(): + + async with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) as project_client: + + # Get an authenticated async azure.ai.inference embeddings client for your default Serverless connection: + async with await project_client.inference.get_embeddings_client() as client: + + response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) + + for item in response.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) + + +async def main(): + await sample_get_embeddings_client_async() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_get_azure_openai_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_get_azure_openai_client.py new file mode 100644 index 000000000000..b4a7e6939385 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_get_azure_openai_client.py @@ -0,0 +1,45 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_azure_openai_client.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + AsyncAzureOpenAI client from the azure.ai.inference package. + +USAGE: + python sample_get_azure_openai_client.py + + Before running the sample: + + pip install azure.ai.projects openai + + Set this environment variable with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential + +with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) as project_client: + + # Get an authenticated OpenAI client for your default Azure OpenAI connection: + with project_client.inference.get_azure_openai_client() as client: + + response = client.chat.completions.create( + model="gpt-4-0613", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client.py new file mode 100644 index 000000000000..16c3b40f7b45 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client.py @@ -0,0 +1,38 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_chat_completions_client.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + async ChatCompletionsClient from the azure.ai.inference package. + +USAGE: + python sample_get_chat_completions_client.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os +from azure.ai.projects import AIProjectClient +from azure.ai.inference.models import UserMessage +from azure.identity import DefaultAzureCredential + +with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) as project_client: + + # Get an authenticated azure.ai.inference chat completions client for your default Serverless connection: + with project_client.inference.get_chat_completions_client() as client: + + response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_get_embeddings_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_get_embeddings_client.py new file mode 100644 index 000000000000..61ea0adaa289 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_get_embeddings_client.py @@ -0,0 +1,42 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_get_embeddings_client.py + +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + async EmbeddingsClient from the azure.ai.inference package. + +USAGE: + python sample_get_embeddings_client.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variable with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential + +with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) as project_client: + + # Get an authenticated azure.ai.inference embeddings client for your default Serverless connection: + with project_client.inference.get_embeddings_client() as client: + + response = client.embed(input=["first phrase", "second phrase", "third phrase"]) + + for item in response.data: + length = len(item.embedding) + print( + f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " + f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" + ) diff --git a/sdk/ai/azure-ai-projects/setup.py b/sdk/ai/azure-ai-projects/setup.py new file mode 100644 index 000000000000..37a6290f3338 --- /dev/null +++ b/sdk/ai/azure-ai-projects/setup.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# coding: utf-8 + +import os +import re +from setuptools import setup, find_packages + + +PACKAGE_NAME = "azure-ai-projects" +PACKAGE_PPRINT_NAME = "Azure Ai Projects" + +# a-b-c => a/b/c +package_folder_path = PACKAGE_NAME.replace("-", "/") + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, "_version.py"), "r") as fd: + version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError("Cannot find version information") + + +setup( + name=PACKAGE_NAME, + version=version, + description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), + long_description=open("README.md", "r").read(), + long_description_content_type="text/markdown", + license="MIT License", + author="Microsoft Corporation", + author_email="azpysdkhelp@microsoft.com", + url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", + keywords="azure, azure sdk", + classifiers=[ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "License :: OSI Approved :: MIT License", + ], + zip_safe=False, + packages=find_packages( + exclude=[ + "tests", + # Exclude packages that will be covered by PEP420 or nspkg + "azure", + "azure.ai", + ] + ), + include_package_data=True, + package_data={ + "azure.ai.projects": ["py.typed"], + }, + install_requires=[ + "isodate>=0.6.1", + "azure-core>=1.30.0", + "typing-extensions>=4.6.0", + ], + python_requires=">=3.8", +) diff --git a/sdk/ai/azure-ai-projects/tests/README.md b/sdk/ai/azure-ai-projects/tests/README.md new file mode 100644 index 000000000000..3fde15d5623b --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/README.md @@ -0,0 +1,72 @@ +# Azure AI Project client library tests for Python + +The instructions below are for running tests locally, on a Windows machine, against the live service. + +## Build and install the client library + +- Clone or download this sample repository. +- Open a command prompt window in the folder `sdk\ai\azure-ai-projects` +- If you want to run tests against the latest published client library, install it by running: + ```bash + pip install azure-ai-projects + ``` +- If you want to run tests against a locally built client library: + - First build the wheel: + ```bash + pip install wheel + pip install -r dev_requirements.txt + python setup.py bdist_wheel + ``` + - Then install the resulting local wheel (update version `1.0.0b1` to the current one): + ```bash + pip install dist\azure_ai_project-1.0.0b1-py3-none-any.whl --user --force-reinstall + ``` + +## Setup for running tests in the `agents` folder + +```bash +set PROJECT_CONNECTION_STRING_AGENTS_TESTS= +``` + +## Setup for running tests in the `evaluations` folder + +## Setup for running tests in the `connections` and `inference` folders + +You need an Azure AI Project that has the following: + +TODO + +Copy the `Project connection string` from the Azure AI Studio and set the following environment variable: + +```bash +set PROJECT_CONNECTION_STRING_CONNECTIONS_TESTS= +``` + +## Configure test proxy + +Configure the test proxy to run live service tests without recordings: + +```bash +set AZURE_TEST_RUN_LIVE=true +set AZURE_SKIP_LIVE_RECORDING=true +set PROXY_URL=http://localhost:5000 +set AZURE_TEST_USE_CLI_AUTH=true +``` + +## Run tests + +To run all tests, type: + +```bash +pytest +``` + +To run tests in a particular folder (`tests\connections` for example): + +```bash +python tests\connections +``` + +## Additional information + +See [test documentation](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md) for additional information, including how to set proxy recordings and run tests using recordings. \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py new file mode 100644 index 000000000000..1c0367f2f417 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -0,0 +1,1119 @@ +# pylint: disable=too-many-lines +# # ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import os +import json +import time +import functools +import datetime +import logging +import sys + +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import FunctionTool, CodeInterpreterTool, FileSearchTool, ToolSet +from azure.core.pipeline.transport import RequestsTransport +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy +from azure.core.exceptions import AzureError, ServiceRequestError, HttpResponseError +from azure.ai.projects.models import FunctionTool +from azure.identity import DefaultAzureCredential + +# TODO clean this up / get rid of anything not in use + +""" +issues I've noticed with the code: + delete_thread(thread.id) fails + cancel_thread(thread.id) expires/times out occasionally + added time.sleep() to the beginning of my last few tests to avoid limits + when using the endpoint from Howie, delete_agent(agent.id) did not work but would not cause an error +""" + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + + +agentClientPreparer = functools.partial( + EnvironmentVariableLoader, + "azure_ai_project", + project_connection_string_agents_tests="https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", +) +""" +agentClientPreparer = functools.partial( + EnvironmentVariableLoader, + 'azure_ai_project', + azure_ai_project_host_name="https://foo.bar.some-domain.ms", + azure_ai_project_subscription_id="00000000-0000-0000-0000-000000000000", + azure_ai_project_resource_group_name="rg-resour-cegr-oupfoo1", + azure_ai_project_workspace_name="abcd-abcdabcdabcda-abcdefghijklm", +) +""" + + +# create tool for agent use +def fetch_current_datetime_live(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + time_json = json.dumps({"current_time": current_datetime}) + return time_json + + +# create tool for agent use +def fetch_current_datetime_recordings(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + time_json = json.dumps({"current_time": "2024-10-10 12:30:19"}) + return time_json + + +# Statically defined user functions for fast reference +user_functions_recording = {"fetch_current_datetime": fetch_current_datetime_recordings} +user_functions_live = {"fetch_current_datetime": fetch_current_datetime_live} + + +# The test class name needs to start with "Test" to get collected by pytest +class TestagentClient(AzureRecordedTestCase): + + # helper function: create client and using environment variables + def create_client(self, **kwargs): + # fetch environment variables + connection_string = kwargs.pop("project_connection_string_agents_tests") + credential = self.get_credential(AIProjectClient, is_async=False) + + # create and return client + client = AIProjectClient.from_connection_string( + credential=credential, + connection=connection_string, + ) + + return client + + # for debugging purposes: if a test fails and its agent has not been deleted, it will continue to show up in the agents list + """ + # NOTE: this test should not be run against a shared resource, as it will delete all agents + @agentClientPreparer() + @recorded_by_proxy + def test_clear_client(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # clear agent list + agents = client.agents.list_agents().data + for agent in agents: + client.agents.delete_agent(agent.id) + assert client.agents.list_agents().data.__len__() == 0 + + # close client + client.close() + """ + + # # ********************************************************************************** + # # + # # UNIT TESTS + # # + # # ********************************************************************************** + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - agent APIs + # # + # # ********************************************************************************** + + # test client creation + @agentClientPreparer() + @recorded_by_proxy + def test_create_client(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # close client + client.close() + + # test agent creation and deletion + @agentClientPreparer() + @recorded_by_proxy + def test_create_delete_agent(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + print("Created client") + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test agent creation with tools + @agentClientPreparer() + @recorded_by_proxy + def test_create_agent_with_tools(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # initialize agent functions + functions = FunctionTool(functions=user_functions_recording) + + # create agent with tools + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=functions.definitions + ) + assert agent.id + print("Created agent, agent ID", agent.id) + assert agent.tools + assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + @agentClientPreparer() + @recorded_by_proxy + def test_update_agent(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + + # update agent and confirm changes went through + agent.update(name="my-agent2", instructions="You are helpful agent") + assert agent.name + assert agent.name == "my-agent2" + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + """ + DISABLED: can't perform consistently on shared resource + @agentClientPreparer() + @recorded_by_proxy + def test_agent_list(self, **kwargs): + # create client and ensure there are no previous agents + client = self.create_client(**kwargs) + list_length = client.agents.list_agents().data.__len__() + + # create agent and check that it appears in the list + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert client.agents.list_agents().data.__len__() == list_length + 1 + assert client.agents.list_agents().data[0].id == agent.id + + # create second agent and check that it appears in the list + agent2 = client.agents.create_agent(model="gpt-4o", name="my-agent2", instructions="You are helpful agent") + assert client.agents.list_agents().data.__len__() == list_length + 2 + assert client.agents.list_agents().data[0].id == agent.id or client.agents.list_agents().data[1].id == agent.id + + # delete agents and check list + client.agents.delete_agent(agent.id) + assert client.agents.list_agents().data.__len__() == list_length + 1 + assert client.agents.list_agents().data[0].id == agent2.id + + client.agents.delete_agent(agent2.id) + assert client.agents.list_agents().data.__len__() == list_length + print("Deleted agents") + + # close client + client.close() + """ + + # ********************************************************************************** + # + # HAPPY PATH SERVICE TESTS - Thread APIs + # + # ********************************************************************************** + + # test creating thread + @agentClientPreparer() + @recorded_by_proxy + def test_create_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + # assert isinstance(thread, agentThread) TODO finish this ! need to import agentThread from _models + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test getting thread + @agentClientPreparer() + @recorded_by_proxy + def test_get_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # get thread + thread2 = client.agents.get_thread(thread.id) + assert thread2.id + assert thread.id == thread2.id + print("Got thread, thread ID", thread2.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + """ + TODO what can I update a thread with? + # test updating thread + @agentClientPreparer() + @recorded_by_proxy + def test_update_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # update thread + client.agents.update_thread(thread.id, ) # TODO what can we update it with? + assert not thread + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + """ + # TODO this test is failing? client.agents.delete_thread(thread.id) isn't working + # status_code = 404, response = + # error_map = {304: , 401: , 409: } + + # test deleting thread + @agentClientPreparer() + @recorded_by_proxy + def test_delete_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + # assert isinstance(thread, agentThread) TODO finish this ! need to import agentThread from _models + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete thread + deletion_status = client.agents.delete_thread(thread.id) + # assert not thread + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Message APIs + # # + # # ********************************************************************************** + + # test creating message in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_create_message(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test creating multiple messages in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_create_multiple_messages(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create messages + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") + assert message2.id + print("Created message, message ID", message2.id) + message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") + assert message3.id + print("Created message, message ID", message3.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test listing messages in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_list_messages(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check that initial message list is empty + messages0 = client.agents.list_messages(thread_id=thread.id) + print(messages0.data) + assert messages0.data.__len__() == 0 + + # create messages and check message list for each one + message1 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message1.id + print("Created message, message ID", message1.id) + messages1 = client.agents.list_messages(thread_id=thread.id) + assert messages1.data.__len__() == 1 + assert messages1.data[0].id == message1.id + + message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") + assert message2.id + print("Created message, message ID", message2.id) + messages2 = client.agents.list_messages(thread_id=thread.id) + assert messages2.data.__len__() == 2 + assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id + + message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") + assert message3.id + print("Created message, message ID", message3.id) + messages3 = client.agents.list_messages(thread_id=thread.id) + assert messages3.data.__len__() == 3 + assert ( + messages3.data[0].id == message3.id + or messages3.data[1].id == message2.id + or messages3.data[2].id == message2.id + ) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test getting message in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_get_message(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # get message + message2 = client.agents.get_message(thread_id=thread.id, message_id=message.id) + assert message2.id + assert message.id == message2.id + print("Got message, message ID", message.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + """ + TODO format the updated body + # test updating message in a thread + @agentClientPreparer() + @recorded_by_proxy + def test_update_message(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # update message + body_json = json.dumps # TODO format body into json -- figure out what the message looks like so I can update it (might be in that picture) + client.agents.update_message(thread_id=thread.id, message_id=message.id, body=) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Run APIs + # # + # # ********************************************************************************** + + # test creating run + @agentClientPreparer() + @recorded_by_proxy + def test_create_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test getting run + @agentClientPreparer() + @recorded_by_proxy + def test_get_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # get run + run2 = client.agents.get_run(thread_id=thread.id, run_id=run.id) + assert run2.id + assert run.id == run2.id + print("Got run, run ID", run2.id) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # TODO fix bc sometimes it works? and sometimes it doesnt? + # test sucessful run status TODO test for cancelled/unsucessful runs + @agentClientPreparer() + @recorded_by_proxy + def test_run_status(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + print("Run status:", run.status) + + assert run.status in ["cancelled", "failed", "completed", "expired"] + print("Run completed with status:", run.status) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + """ + # TODO another, but check that the number of runs decreases after cancelling runs + # TODO can each thread only support one run? + # test listing runs + @agentClientPreparer() + @recorded_by_proxy + def test_list_runs(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check list for current runs + runs0 = client.agents.list_runs(thread_id=thread.id) + assert runs0.data.__len__() == 0 + + # create run and check list + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + runs1 = client.agents.list_runs(thread_id=thread.id) + assert runs1.data.__len__() == 1 + assert runs1.data[0].id == run.id + + # create second run + run2 = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run2.id + print("Created run, run ID", run2.id) + runs2 = client.agents.list_runs(thread_id=thread.id) + assert runs2.data.__len__() == 2 + assert runs2.data[0].id == run2.id or runs2.data[1].id == run2.id + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + """ + # TODO figure out what to update the run with + # test updating run + @agentClientPreparer() + @recorded_by_proxy + def test_update_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # update run + body = json.dumps({'todo': 'placeholder'}) + client.agents.update_run(thread_id=thread.id, run_id=run.id, body=body) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + # test submitting tool outputs to run + @agentClientPreparer() + @recorded_by_proxy + def test_submit_tool_outputs_to_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # Initialize agent tools + functions = FunctionTool(user_functions_recording) + code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + + # create agent + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent", toolset=toolset + ) + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print( + "No tool calls provided - cancelling run" + ) # TODO how can i make sure that it wants tools? should i have some kind of error message? + client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here + print("Tool outputs:", tool_outputs) + if tool_outputs: + client.agents.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + messages = client.agents.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + hour12 = time.strftime("%H") + hour24 = time.strftime("%I") + minute = time.strftime("%M") + assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute + print("Used tool_outputs") + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + """ + # DISABLED: rewrite to ensure run is not complete when cancel_run is called + # test cancelling run + @agentClientPreparer() + @recorded_by_proxy + def test_cancel_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # check status and cancel + assert run.status in ["queued", "in_progress", "requires_action"] + client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + + while run.status in ["queued", "cancelling"]: + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + print("Current run status:", run.status) + assert run.status == "cancelled" + print("Run cancelled") + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + # test create thread and run + @agentClientPreparer() + @recorded_by_proxy + def test_create_thread_and_run(self, **kwargs): + time.sleep(26) + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread and run + run = client.agents.create_thread_and_run(assistant_id=agent.id) + assert run.id + assert run.thread_id + print("Created run, run ID", run.id) + + # get thread + thread = client.agents.get_thread(run.thread_id) + assert thread.id + print("Created thread, thread ID", thread.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + # assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + assert run.status == "completed" + print("Run completed") + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test listing run steps + @agentClientPreparer() + @recorded_by_proxy + def test_list_run_step(self, **kwargs): + + time.sleep(50) + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + # commenting assertion out below, do we know exactly when run starts? + # assert steps['data'].__len__() == 0 + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + assert steps["data"].__len__() > 0 # TODO what else should we look at? + + assert run.status == "completed" + print("Run completed") + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # test getting run step + # TODO where are step ids from + @agentClientPreparer() + @recorded_by_proxy + def test_get_run_step(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + # list steps, check that get_run_step works with first step_id + steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + assert steps["data"].__len__() > 0 + step = steps["data"][0] + get_step = client.agents.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + assert step == get_step + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Streaming APIs + # # + # # ********************************************************************************** + + # # ********************************************************************************** + # # + # # NEGATIVE TESTS - TODO idk what goes here + # # + # # ********************************************************************************** + + """ + # DISABLED, PASSES LIVE ONLY: recordings don't capture DNS lookup errors + # test agent creation and deletion + @agentClientPreparer() + @recorded_by_proxy + def test_negative_create_delete_agent(self, **kwargs): + # create client using bad endpoint + bad_connection_string = "https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm" + + credential = self.get_credential(AIProjectClient, is_async=False) + client = AIProjectClient.from_connection_string( + credential=credential, + connection=bad_connection_string, + ) + + # attempt to create agent with bad client + exception_caught = False + try: + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + # check for error (will not have a status code since it failed on request -- no response was recieved) + except (ServiceRequestError, HttpResponseError) as e: + exception_caught = True + if type(e) == ServiceRequestError: + assert e.message + assert "failed to resolve 'foo.bar.some-domain.ms'" in e.message.lower() + else: + assert "No such host is known" and "foo.bar.some-domain.ms" in str(e) + + # close client and confirm an exception was caught + client.close() + assert exception_caught + """ diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_deserialization.py b/sdk/ai/azure-ai-projects/tests/agents/test_deserialization.py new file mode 100644 index 000000000000..d164d514f443 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/agents/test_deserialization.py @@ -0,0 +1,92 @@ +# # ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import copy +import datetime +import pytest + +from azure.ai.projects.models._models import ThreadRun, RunStep, ThreadMessage +from azure.ai.projects.models._patch import _safe_instantiate, _filter_parameters + +class TestDeserialization: + """Tests for deserialization of sse responses.""" + + @pytest.mark.parametrize( + "valid_params,model_cls", + [ + ( + { + "id": "12345", + "object": "thread.run", + "thread_id": "6789", + "assistant_id": "101112", + "status": "in_progress", + "required_action": "test", + "last_error": "none", + "model": "gpt-4", + "instructions": "Test instruction", + "tools": "Test function", + "created_at": datetime.datetime(2024, 11, 14), + "expires_at": datetime.datetime(2024, 11, 17), + "started_at": datetime.datetime(2024, 11, 15), + "completed_at": datetime.datetime(2024, 11, 16), + "cancelled_at": datetime.datetime(2024, 11, 16), + "failed_at": datetime.datetime(2024, 11, 16), + "incomplete_details": "max_completion_tokens", + "usage": "in_progress", + "temperature": 1.0, + "top_p": 1.0, + "max_completion_tokens": 1000, + "truncation_strategy": "test", + "tool_choice": "tool name", + "response_format": "json", + "metadata": {"foo": "bar"}, + "tool_resources": "test", + "parallel_tool_calls": True, + }, + ThreadRun, + ), + ( + { + "id": "1233", + "object": "thread.message", + "created_at": datetime.datetime(2024, 11, 14), + "thread_id": "5678", + "status": "incomplete", + "incomplete_details": "test", + "completed_at": datetime.datetime(2024, 11, 16), + "incomplete_at": datetime.datetime(2024, 11, 16), + "role": "assistant", + "content": "Test", + "assistant_id": "9911", + "run_id": "11", + "attachments": ["4", "8", "15", "16", "23", "42"], + "metadata": {"foo", "bar"}, + }, + ThreadMessage, + ), + ], + ) + def test_correct_thread_params(self, valid_params, model_cls): + """Test that if service returned extra parameter in SSE response, it does not create issues.""" + + bad_params = {"foo": "bar"} + params = copy.deepcopy(valid_params) + params.update(bad_params) + # We should bot e able to create Thread Run with bad parameters. + with pytest.raises(TypeError): + model_cls(**params) + filtered_params = _filter_parameters(model_cls, params) + for k in valid_params: + assert k in filtered_params + for k in bad_params: + assert k not in filtered_params + # Implicitly check that we can create object with the filtered parameters. + model_cls(**filtered_params) + # Check safe initialization. + assert isinstance(_safe_instantiate(model_cls, params), model_cls) + + def test_safe_instantiate_non_dict(self): + """Test that safe_instantiate method when user supplies not a dictionary.""" + assert _safe_instantiate(RunStep, 42) == 42 diff --git a/sdk/ai/azure-ai-projects/tests/conftest.py b/sdk/ai/azure-ai-projects/tests/conftest.py new file mode 100644 index 000000000000..d944cdf86007 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/conftest.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import pytest +from devtools_testutils import test_proxy, remove_batch_sanitizers + + +# autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method +@pytest.fixture(scope="session", autouse=True) +def start_proxy(test_proxy): + return + + +@pytest.fixture(scope="session", autouse=True) +def add_sanitizers(test_proxy): + # Remove the following sanitizers since certain fields are needed in tests and are non-sensitive: + # - AZSDK3493: $..name + remove_batch_sanitizers(["AZSDK3493"]) diff --git a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py new file mode 100644 index 000000000000..86531e41f138 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py @@ -0,0 +1,40 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import sys +import logging +import functools +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader + +ServicePreparerChatCompletions = functools.partial( + EnvironmentVariableLoader, + "project_connection_string", + project_connection_string_connections_tests="endpoint;azure-subscription-id;azure-rg-name;ai-studio-hub-name", +) + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + +class ConnectionsTestBase: + + def get_sync_client(self, **kwargs) -> AIProjectClient: + conn_str = kwargs.pop("project_connection_string_connections_tests") + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=conn_str, + ) + return project_client + diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py new file mode 100644 index 000000000000..de74233468b6 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py @@ -0,0 +1,27 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import sys +import logging +import datetime + +from azure.ai.projects.models import SASTokenCredential +from azure.core.credentials import TokenCredential, AccessToken +from azure.core.exceptions import HttpResponseError + +from connection_test_base import ConnectionsTestBase + + +# The test class name needs to start with "Test" to get collected by pytest +class TestConnections(ConnectionsTestBase): + + def test_get_connection(self, **kwargs): + project_client = self.get_sync_client(**kwargs) + pass + + def test_get_default_connection(self, **kwargs): + pass + + def test_list_connections(self, **kwargs): + pass \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py new file mode 100644 index 000000000000..a6dfb5843f99 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py @@ -0,0 +1,98 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import datetime +from azure.ai.projects.models import SASTokenCredential +from azure.core.credentials import TokenCredential, AccessToken +from azure.core.exceptions import HttpResponseError +from connection_test_base import ConnectionsTestBase + +class FakeTokenCredential(TokenCredential): + def get_token(self, *scopes, **kwargs): + # Create a fake token with an expiration time + token = "fake_token" + expires_on = datetime.datetime.now() + datetime.timedelta(hours=1) + return AccessToken(token, expires_on.timestamp()) + + +# The test class name needs to start with "Test" to get collected by pytest +class TestConnectionsUnitTests(ConnectionsTestBase): + + # ********************************************************************************** + # + # UNIT TESTS + # + # ********************************************************************************** + + def test_sas_token_credential_class_mocked(self, **kwargs): + import jwt + import datetime + import time + + # Create a simple JWT with 10 seconds expiration time + token_duration_sec = 5 + secret_key = "my_secret_key" + token_duration_sec = 5 + sas_token_expiration: datetime = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta( + seconds=token_duration_sec + ) + sas_token_expiration = sas_token_expiration.replace(microsecond=0) + payload = {"exp": sas_token_expiration} + sas_token = jwt.encode(payload, secret_key) + + # You can parse the token string on https://jwt.ms/. The "exp" value there is the + # token expiration time in Unix timestamp format (seconds since 1970-01-01 00:00:00 UTC). + # See https://www.epochconverter.com/ to convert Unix time to readable date & time. + # The base64 decoded string will look something like this: + # { + # "alg": "HS256", + # "typ": "JWT" + # }.{ + # "exp": 1727208894 + # }.[Signature] + print(f"Generated JWT token: {sas_token}") + + sas_token_credential = SASTokenCredential( + sas_token=sas_token, + credential=FakeTokenCredential(), + subscription_id="fake_subscription_id", + resource_group_name="fake_resouce_group", + project_name="fake_project_name", + connection_name="fake_connection_name", + ) + assert sas_token_credential._expires_on == sas_token_expiration + + exception_caught = False + try: + for _ in range(token_duration_sec + 2): + print("Looping...") + time.sleep(1) + access_token = sas_token_credential.get_token() + except HttpResponseError as e: + exception_caught = True + print(e) + assert exception_caught + + # Unit tests for the SASTokenCredential class + def test_sas_token_credential_class_real(self, **kwargs): + + # Example of real SAS token for AOAI service. You can parse it on https://jwt.ms/. The "exp" value there is the + # token expiration time in Unix timestamp format (seconds since 1970-01-01 00:00:00 UTC) + token = "eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleTEiLCJ0eXAiOiJKV1QifQ.eyJyZWdpb24iOiJlYXN0dXMyZXVhcCIsInN1YnNjcmlwdGlvbi1pZCI6IjQyZjVlYWFjMjc5MDRiMGViMDI4ZTVkZjcyYzg5ZDAxIiwicHJvZHVjdC1pZCI6Ik9wZW5BSS5TMCIsImNvZ25pdGl2ZS1zZXJ2aWNlcy1lbmRwb2ludCI6Imh0dHBzOi8vYXBpLmNvZ25pdGl2ZS5taWNyb3NvZnQuY29tL2ludGVybmFsL3YxLjAvIiwiYXp1cmUtcmVzb3VyY2UtaWQiOiIvc3Vic2NyaXB0aW9ucy84ZjMzOGY2ZS00ZmNlLTQ0YWUtOTY5Yy1mYzdkOGZkYTAzMGUvcmVzb3VyY2VHcm91cHMvYXJncnlnb3JfY2FuYXJ5L3Byb3ZpZGVycy9NaWNyb3NvZnQuQ29nbml0aXZlU2VydmljZXMvYWNjb3VudHMvYXJncnlnb3ItY2FuYXJ5LWFvYWkiLCJzY29wZSI6Imh0dHBzOi8vc3BlZWNoLnBsYXRmb3JtLmJpbmcuY29tIiwiYXVkIjoidXJuOm1zLnNwZWVjaCIsImV4cCI6MTcyNjc4MjI0NiwiaXNzIjoidXJuOm1zLmNvZ25pdGl2ZXNlcnZpY2VzIn0.L7VvsXPzbwHQeMS-o9Za4itkU6uP4-KFMyOpTsYD9tpIJa_qChMHDl8FHy5n7K5L1coKg8sJE6LlJICFdU1ALQ" + expiration_date_linux_time = 1726782246 # Value of "exp" field in the token. See https://www.epochconverter.com/ to convert to date & time + expiration_datatime_utc = datetime.datetime.fromtimestamp(expiration_date_linux_time, datetime.timezone.utc) + print(f"\n[TEST] Expected expiration date: {expiration_datatime_utc}") + + sas_token_credential = SASTokenCredential( + sas_token=token, + credential=None, + subscription_id=None, + resource_group_name=None, + project_name=None, + connection_name=None, + ) + + print(f"[TEST] Actual expiration date: {sas_token_credential._expires_on}") + assert sas_token_credential._expires_on == expiration_datatime_utc + diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml new file mode 100644 index 000000000000..24bed3aa2376 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -0,0 +1,4 @@ +directory: specification/ai/Azure.AI.Projects +commit: 6c50f709580b8ae61f69f6a7ccc243b5e01279d1 +repo: Azure/azure-rest-api-specs +additionalDirectories: From 0f9dc5ce9dfd9990cd49d0a22bba7254bc189a38 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 24 Oct 2024 06:28:23 -0700 Subject: [PATCH 051/138] Add Inference tests (#38069) --- sdk/ai/azure-ai-projects/tests/README.md | 9 ++- .../tests/connections/connection_test_base.py | 20 ++++--- .../tests/connections/test_connections.py | 28 +++++----- .../tests/inference/inference_test_base.py | 56 +++++++++++++++++++ .../tests/inference/test_inference.py | 52 +++++++++++++++++ .../tests/inference/test_inference_async.py | 53 ++++++++++++++++++ 6 files changed, 195 insertions(+), 23 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py create mode 100644 sdk/ai/azure-ai-projects/tests/inference/test_inference.py create mode 100644 sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py diff --git a/sdk/ai/azure-ai-projects/tests/README.md b/sdk/ai/azure-ai-projects/tests/README.md index 3fde15d5623b..fe61558eb116 100644 --- a/sdk/ai/azure-ai-projects/tests/README.md +++ b/sdk/ai/azure-ai-projects/tests/README.md @@ -22,6 +22,12 @@ The instructions below are for running tests locally, on a Windows machine, agai pip install dist\azure_ai_project-1.0.0b1-py3-none-any.whl --user --force-reinstall ``` +## Log in to Azure + +```bash +az login +``` + ## Setup for running tests in the `agents` folder ```bash @@ -39,7 +45,8 @@ TODO Copy the `Project connection string` from the Azure AI Studio and set the following environment variable: ```bash -set PROJECT_CONNECTION_STRING_CONNECTIONS_TESTS= +set AZURE_AI_PROJECTS_CONNECTIONS_TEST_PROJECT_CONNECTION_STRING= +set AZURE_AI_PROJECTS_CONNECTIONS_TEST_MODEL_DEPLOYMENT_NAME= ``` ## Configure test proxy diff --git a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py index 86531e41f138..b9168414a6ae 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py @@ -9,14 +9,19 @@ from azure.identity import DefaultAzureCredential from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader -ServicePreparerChatCompletions = functools.partial( +""" +Set these environment variables before running the test: +set AZURE_AI_PROJECTS_CONNECTIONS_TEST_PROJECT_CONNECTION_STRING= +""" +servicePreparerConnectionsTests = functools.partial( EnvironmentVariableLoader, - "project_connection_string", - project_connection_string_connections_tests="endpoint;azure-subscription-id;azure-rg-name;ai-studio-hub-name", + "azure_ai_projects_connections_test", + azure_ai_projects_connections_test_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;hub-name", ) + # Set to True to enable SDK logging -LOGGING_ENABLED = True +LOGGING_ENABLED = False if LOGGING_ENABLED: # Create a logger for the 'azure' SDK @@ -28,13 +33,14 @@ handler = logging.StreamHandler(stream=sys.stdout) logger.addHandler(handler) -class ConnectionsTestBase: +class ConnectionsTestBase(AzureRecordedTestCase): def get_sync_client(self, **kwargs) -> AIProjectClient: - conn_str = kwargs.pop("project_connection_string_connections_tests") + conn_str = kwargs.pop("azure_ai_projects_connections_test_project_connection_string") project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), + credential=self.get_credential(AIProjectClient, is_async=False), conn_str=conn_str, + logging_enable=LOGGING_ENABLED ) return project_client diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py index de74233468b6..09b99d2f9139 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py @@ -2,26 +2,24 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -import sys -import logging -import datetime - -from azure.ai.projects.models import SASTokenCredential -from azure.core.credentials import TokenCredential, AccessToken -from azure.core.exceptions import HttpResponseError - -from connection_test_base import ConnectionsTestBase +from devtools_testutils import recorded_by_proxy +from connection_test_base import ConnectionsTestBase, servicePreparerConnectionsTests # The test class name needs to start with "Test" to get collected by pytest class TestConnections(ConnectionsTestBase): - def test_get_connection(self, **kwargs): + @servicePreparerConnectionsTests() + @recorded_by_proxy + def test_connections_get(self, **kwargs): project_client = self.get_sync_client(**kwargs) - pass - def test_get_default_connection(self, **kwargs): - pass + @servicePreparerConnectionsTests() + @recorded_by_proxy + def test_connections_get_default(self, **kwargs): + project_client = self.get_sync_client(**kwargs) - def test_list_connections(self, **kwargs): - pass \ No newline at end of file + @servicePreparerConnectionsTests() + @recorded_by_proxy + def test_connections_list(self, **kwargs): + project_client = self.get_sync_client(**kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py new file mode 100644 index 000000000000..efaf63eddfc2 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py @@ -0,0 +1,56 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import sys +import logging +import functools +from azure.ai.projects import AIProjectClient +from azure.ai.projects.aio import AIProjectClient as AIProjectClientAsync +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader + +""" +Set these environment variables before running the test: +set AZURE_AI_PROJECTS_CONNECTIONS_TEST_PROJECT_CONNECTION_STRING= +set AZURE_AI_PROJECTS_CONNECTIONS_TEST_MODEL_DEPLOYMENT_NAME= +""" +servicePreparerInferenceTests = functools.partial( + EnvironmentVariableLoader, + "azure_ai_projects_connections_test", + azure_ai_projects_connections_test_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;hub-name", + azure_ai_projects_connections_test_model_deployment_name="model-deployment-name", +) + + +# Set to True to enable SDK logging +LOGGING_ENABLED = False + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + +class InferenceTestBase(AzureRecordedTestCase): + + def get_sync_client(self, **kwargs) -> AIProjectClient: + conn_str = kwargs.pop("azure_ai_projects_connections_test_project_connection_string") + project_client = AIProjectClient.from_connection_string( + credential=self.get_credential(AIProjectClient, is_async=False), + conn_str=conn_str, + logging_enable=LOGGING_ENABLED + ) + return project_client + + def get_async_client(self, **kwargs) -> AIProjectClient: + conn_str = kwargs.pop("azure_ai_projects_connections_test_project_connection_string") + project_client = AIProjectClientAsync.from_connection_string( + credential=self.get_credential(AIProjectClientAsync, is_async=False), + conn_str=conn_str, + logging_enable=LOGGING_ENABLED + ) + return project_client diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py new file mode 100644 index 000000000000..5afdfe938e7e --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py @@ -0,0 +1,52 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import pprint +from devtools_testutils import recorded_by_proxy +from inference_test_base import InferenceTestBase, servicePreparerInferenceTests +from azure.ai.inference.models import SystemMessage, UserMessage + +# The test class name needs to start with "Test" to get collected by pytest +class TestInference(InferenceTestBase): + + @servicePreparerInferenceTests() + @recorded_by_proxy + def test_inference_get_azure_openai_client(self, **kwargs): + model = kwargs.pop("azure_ai_projects_connections_test_model_deployment_name") + with self.get_sync_client(**kwargs) as project_client: + with project_client.inference.get_azure_openai_client() as azure_openai_client: + response = azure_openai_client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + } + ], + model=model, + ) + pprint.pprint(response) + contains=["5280", "5,280"] + assert any(item in response.choices[0].message.content for item in contains) + + @servicePreparerInferenceTests() + @recorded_by_proxy + def test_inference_get_chat_completions_client(self, **kwargs): + with self.get_sync_client(**kwargs) as project_client: + with project_client.inference.get_chat_completions_client() as azure_ai_inference_client: + response = azure_ai_inference_client.complete( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ] + ) + pprint.pprint(response) + contains=["5280", "5,280"] + assert any(item in response.choices[0].message.content for item in contains) + + @servicePreparerInferenceTests() + @recorded_by_proxy + def test_inference_get_embeddings_client(self, **kwargs): + with self.get_sync_client(**kwargs) as project_client: + # TODO: Add test code here + pass diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py new file mode 100644 index 000000000000..d3ebbb792221 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py @@ -0,0 +1,53 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import pprint +from devtools_testutils.aio import recorded_by_proxy_async +from inference_test_base import InferenceTestBase, servicePreparerInferenceTests +from azure.ai.inference.models import SystemMessage, UserMessage + +# The test class name needs to start with "Test" to get collected by pytest +class TestInferenceAsync(InferenceTestBase): + + @servicePreparerInferenceTests() + @recorded_by_proxy_async + async def test_inference_get_azure_openai_client_async(self, **kwargs): + model = kwargs.pop("azure_ai_projects_connections_test_model_deployment_name") + async with self.get_async_client(**kwargs) as project_client: + async with await project_client.inference.get_azure_openai_client() as azure_openai_client: + response = await azure_openai_client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + } + ], + model=model, + ) + pprint.pprint(response) + contains=["5280", "5,280"] + assert any(item in response.choices[0].message.content for item in contains) + + + @servicePreparerInferenceTests() + @recorded_by_proxy_async + async def test_inference_get_chat_completions_client_async(self, **kwargs): + async with self.get_async_client(**kwargs) as project_client: + async with await project_client.inference.get_chat_completions_client() as azure_ai_inference_client: + response = await azure_ai_inference_client.complete( + messages=[ + SystemMessage(content="You are a helpful assistant."), + UserMessage(content="How many feet are in a mile?"), + ] + ) + pprint.pprint(response) + contains=["5280", "5,280"] + assert any(item in response.choices[0].message.content for item in contains) + + @servicePreparerInferenceTests() + @recorded_by_proxy_async + async def test_inference_get_embeddings_client_async(self, **kwargs): + async with self.get_async_client(**kwargs) as project_client: + # TODO: Add test code here + pass From 01eafd62c2eb9eb318d7d4cecac05baef36cf484 Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Thu, 24 Oct 2024 11:08:50 -0500 Subject: [PATCH 052/138] re-doing the agent toolu updates after package changes (#38078) Co-authored-by: Marko Hietala --- .../azure/ai/projects/models/_patch.py | 30 +++++++++++++++---- ...ts_vector_store_batch_file_search_async.py | 15 ++++++++++ ...e_agents_vector_store_batch_file_search.py | 15 ++++++++++ .../samples/agents/user_functions.py | 9 +++--- 4 files changed, 59 insertions(+), 10 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 9925347721e1..1c85ce5b7385 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -36,7 +36,7 @@ ) from abc import ABC, abstractmethod -from typing import AsyncIterator, Awaitable, Callable, List, Dict, Any, Type, Optional, Iterator, Tuple, get_origin +from typing import AsyncIterator, Awaitable, Callable, List, Dict, Any, Type, Optional, Iterator, Tuple, Set, get_origin logger = logging.getLogger(__name__) @@ -274,14 +274,18 @@ class FunctionTool(Tool): A tool that executes user-defined functions. """ - def __init__(self, functions: Dict[str, Any]): + def __init__(self, functions: Set[Callable[..., Any]]): """ - Initialize FunctionTool with a dictionary of functions. + Initialize FunctionTool with a set of functions. - :param functions: A dictionary where keys are function names and values are the function objects. + :param functions: A set of function objects. """ - self._functions = functions - self._definitions = self._build_function_definitions(functions) + self._functions = self._create_function_dict(functions) + self._definitions = self._build_function_definitions(self._functions) + + def _create_function_dict(self, funcs: Set[Callable[..., Any]]) -> Dict[str, Callable[..., Any]]: + func_dict = {func.__name__: func for func in funcs} + return func_dict def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDefinition]: specs = [] @@ -385,6 +389,12 @@ def add_vector_store(self, store_id: str): """ self.vector_store_ids.append(store_id) + def remove_vector_store(self, store_id: str): + """ + Remove a vector store ID from the list of vector stores to search for files. + """ + self.vector_store_ids.remove(store_id) + @property def definitions(self) -> List[ToolDefinition]: """ @@ -419,6 +429,14 @@ def add_file(self, file_id: str): """ self.file_ids.append(file_id) + def remove_file(self, file_id: str): + """ + Remove a file ID from the list of files to interpret. + + :param file_id: The ID of the file to remove. + """ + self.file_ids.remove(file_id) + @property def definitions(self) -> List[ToolDefinition]: """ diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py index eb04c0e98673..3b001d0c920d 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -77,6 +77,21 @@ async def main(): run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Created run, run ID: {run.id}") + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + await project_client.agents.update_agent(assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources) + print(f"Updated agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await project_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?") + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + await project_client.agents.delete_file(file.id) print("Deleted file") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py index c26ce41cbebe..c5c0f5ec2072 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -75,6 +75,21 @@ run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Created run, run ID: {run.id}") + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + await project_client.agents.update_agent(assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources) + print(f"Updated agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?") + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + project_client.agents.delete_file(file.id) print("Deleted file") diff --git a/sdk/ai/azure-ai-projects/samples/agents/user_functions.py b/sdk/ai/azure-ai-projects/samples/agents/user_functions.py index 8072b1b8a944..25f7be372ef7 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/user_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/user_functions.py @@ -5,6 +5,7 @@ import json import datetime +from typing import Any, Callable, Set # These are the user-defined functions that can be called by the agent. @@ -58,8 +59,8 @@ def send_email(recipient: str, subject: str, body: str) -> str: # Statically defined user functions for fast reference -user_functions = { - "fetch_current_datetime": fetch_current_datetime, - "fetch_weather": fetch_weather, - "send_email": send_email, +user_functions: Set[Callable[..., Any]] = { + fetch_current_datetime, + fetch_weather, + send_email, } From 216258cfecbec6f92ae59d5bb1fa46a43cb5c5c8 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 25 Oct 2024 18:47:37 -0700 Subject: [PATCH 053/138] Add .diagnostics.enable() method to fetch App Insights connection strings (#38109) --- sdk/ai/azure-ai-client/CHANGELOG.md | 5 - sdk/ai/azure-ai-client/LICENSE | 21 - sdk/ai/azure-ai-client/MANIFEST.in | 7 - sdk/ai/azure-ai-client/README.md | 80 - sdk/ai/azure-ai-client/assets.json | 6 - sdk/ai/azure-ai-client/azure/__init__.py | 1 - sdk/ai/azure-ai-client/azure/ai/__init__.py | 1 - .../azure/ai/client/__init__.py | 26 - .../azure/ai/client/_client.py | 137 - .../azure/ai/client/_configuration.py | 91 - .../azure/ai/client/_model_base.py | 1159 --- .../azure-ai-client/azure/ai/client/_patch.py | 246 - .../azure/ai/client/_serialization.py | 2114 ----- .../azure-ai-client/azure/ai/client/_types.py | 18 - .../azure/ai/client/_vendor.py | 50 - .../azure/ai/client/_version.py | 9 - .../azure/ai/client/aio/__init__.py | 23 - .../azure/ai/client/aio/_client.py | 139 - .../azure/ai/client/aio/_configuration.py | 91 - .../azure/ai/client/aio/_patch.py | 200 - .../ai/client/aio/operations/__init__.py | 23 - .../ai/client/aio/operations/_operations.py | 6045 -------------- .../azure/ai/client/aio/operations/_patch.py | 1977 ----- .../azure/ai/client/models/__init__.py | 365 - .../azure/ai/client/models/_enums.py | 513 -- .../azure/ai/client/models/_models.py | 6104 -------------- .../azure/ai/client/models/_patch.py | 1015 --- .../azure/ai/client/operations/__init__.py | 23 - .../azure/ai/client/operations/_operations.py | 7392 ---------------- .../azure/ai/client/operations/_patch.py | 1982 ----- .../azure-ai-client/azure/ai/client/py.typed | 1 - sdk/ai/azure-ai-client/dev_requirements.txt | 4 - sdk/ai/azure-ai-client/requirements.txt | 0 .../sample_agents_basics_async.py | 76 - .../sample_agents_functions_async.py | 117 - ...sample_agents_stream_eventhandler_async.py | 96 - ..._stream_eventhandler_with_toolset_async.py | 111 - .../sample_agents_stream_iteration_async.py | 92 - ...ts_vector_store_batch_file_search_async.py | 109 - ...gents_with_file_search_attachment_async.py | 83 - .../async_samples/user_async_functions.py | 29 - .../samples/agents/product_info_1.md | 51 - .../samples/agents/sample_agents_basics.py | 63 - ...mple_agents_code_interpreter_attachment.py | 80 - .../agents/sample_agents_file_search.py | 87 - .../samples/agents/sample_agents_functions.py | 105 - .../agents/sample_agents_run_with_toolset.py | 80 - .../sample_agents_stream_eventhandler.py | 98 - ...ents_stream_eventhandler_with_functions.py | 132 - ...agents_stream_eventhandler_with_toolset.py | 109 - .../agents/sample_agents_stream_iteration.py | 92 - ...le_agents_stream_iteration_with_toolset.py | 122 - ...e_agents_vector_store_batch_file_search.py | 104 - ...mple_agents_with_file_search_attachment.py | 75 - .../samples/agents/user_functions.py | 66 - .../async_samples/sample_connections_async.py | 139 - .../samples/connections/sample_connections.py | 120 - .../evaluations/evaluate_test_data.jsonl | 3 - .../samples/evaluations/sample_evaluations.py | 88 - .../sample_evaluations_schedules.py | 69 - .../sample_get_azure_openai_client_async.py | 57 - ...ample_get_chat_completions_client_async.py | 49 - .../sample_get_embeddings_client_async.py | 54 - .../sample_get_azure_openai_client.py | 45 - .../sample_get_chat_completions_client.py | 38 - .../inference/sample_get_embeddings_client.py | 42 - sdk/ai/azure-ai-client/setup.py | 71 - sdk/ai/azure-ai-client/tests/README.md | 79 - .../tests/agents/test_agents_client.py | 1119 --- .../tests/agents/test_deserialization.py | 92 - sdk/ai/azure-ai-client/tests/conftest.py | 20 - .../tests/endpoints/unit_tests.py | 114 - sdk/ai/azure-ai-client/tsp-location.yaml | 4 - sdk/ai/azure-ai-project/CHANGELOG.md | 5 - sdk/ai/azure-ai-project/LICENSE | 21 - sdk/ai/azure-ai-project/MANIFEST.in | 7 - sdk/ai/azure-ai-project/README.md | 80 - sdk/ai/azure-ai-project/azure/__init__.py | 1 - sdk/ai/azure-ai-project/azure/ai/__init__.py | 1 - .../azure/ai/project/__init__.py | 32 - .../azure/ai/project/_client.py | 137 - .../azure/ai/project/_configuration.py | 91 - .../azure/ai/project/_model_base.py | 1159 --- .../azure/ai/project/_patch.py | 246 - .../azure/ai/project/_serialization.py | 2114 ----- .../azure/ai/project/_types.py | 18 - .../azure/ai/project/_vendor.py | 50 - .../azure/ai/project/_version.py | 9 - .../azure/ai/project/aio/__init__.py | 29 - .../azure/ai/project/aio/_client.py | 139 - .../azure/ai/project/aio/_configuration.py | 91 - .../azure/ai/project/aio/_patch.py | 200 - .../ai/project/aio/operations/__init__.py | 29 - .../ai/project/aio/operations/_operations.py | 6049 -------------- .../azure/ai/project/aio/operations/_patch.py | 1977 ----- .../azure/ai/project/models/__init__.py | 376 - .../azure/ai/project/models/_enums.py | 513 -- .../azure/ai/project/models/_models.py | 6105 -------------- .../azure/ai/project/models/_patch.py | 997 --- .../azure/ai/project/operations/__init__.py | 29 - .../ai/project/operations/_operations.py | 7396 ----------------- .../azure/ai/project/operations/_patch.py | 1982 ----- .../azure/ai/project/py.typed | 1 - sdk/ai/azure-ai-project/dev_requirements.txt | 4 - .../sample_agents_basics_async.py | 76 - .../sample_agents_functions_async.py | 117 - ...sample_agents_stream_eventhandler_async.py | 96 - ..._stream_eventhandler_with_toolset_async.py | 111 - .../sample_agents_stream_iteration_async.py | 92 - ...ts_vector_store_batch_file_search_async.py | 94 - ...gents_with_file_search_attachment_async.py | 83 - .../async_samples/user_async_functions.py | 29 - .../samples/agents/product_info_1.md | 51 - .../samples/agents/sample_agents_basics.py | 63 - ...mple_agents_code_interpreter_attachment.py | 80 - .../agents/sample_agents_file_search.py | 87 - .../samples/agents/sample_agents_functions.py | 105 - .../agents/sample_agents_run_with_toolset.py | 80 - .../sample_agents_stream_eventhandler.py | 98 - ...ents_stream_eventhandler_with_functions.py | 132 - ...agents_stream_eventhandler_with_toolset.py | 109 - .../agents/sample_agents_stream_iteration.py | 92 - ...le_agents_stream_iteration_with_toolset.py | 122 - ...e_agents_vector_store_batch_file_search.py | 88 - ...mple_agents_with_file_search_attachment.py | 75 - .../samples/agents/user_functions.py | 65 - .../async_samples/sample_connections_async.py | 139 - .../samples/connections/sample_connections.py | 120 - .../evaluations/evaluate_test_data.jsonl | 3 - .../samples/evaluations/sample_evaluations.py | 88 - .../sample_evaluations_schedules.py | 75 - .../sample_get_azure_openai_client_async.py | 57 - ...ample_get_chat_completions_client_async.py | 49 - .../sample_get_embeddings_client_async.py | 54 - .../sample_get_azure_openai_client.py | 45 - .../inference/sample_get_embeddings_client.py | 42 - sdk/ai/azure-ai-project/setup.py | 71 - sdk/ai/azure-ai-project/tests/README.md | 72 - .../tests/agents/test_agents_client.py | 1119 --- .../tests/agents/test_deserialization.py | 92 - sdk/ai/azure-ai-project/tests/conftest.py | 20 - .../tests/connections/connection_test_base.py | 40 - .../tests/connections/test_connections.py | 27 - .../test_connections_unit_tests.py | 98 - sdk/ai/azure-ai-project/tsp-location.yaml | 4 - .../azure/ai/projects/_client.py | 5 +- .../azure/ai/projects/_patch.py | 49 +- .../azure/ai/projects/aio/_client.py | 5 +- .../azure/ai/projects/aio/_patch.py | 43 +- .../ai/projects/aio/operations/__init__.py | 2 + .../ai/projects/aio/operations/_operations.py | 213 +- .../ai/projects/aio/operations/_patch.py | 84 +- .../azure/ai/projects/models/__init__.py | 4 + .../azure/ai/projects/models/_models.py | 164 +- .../azure/ai/projects/models/_patch.py | 5 +- .../azure/ai/projects/operations/__init__.py | 2 + .../ai/projects/operations/_operations.py | 258 +- .../azure/ai/projects/operations/_patch.py | 84 +- .../azure_ai_projects_tests.env | 19 + ...sample_agents_stream_eventhandler_async.py | 8 +- ...ts_vector_store_batch_file_search_async.py | 14 +- ...gents_with_file_search_attachment_async.py | 4 +- .../agents/sample_agents_file_search.py | 4 +- ...agents_stream_eventhandler_with_toolset.py | 9 +- ...e_agents_vector_store_batch_file_search.py | 10 +- .../samples/evaluations/sample_evaluations.py | 13 +- .../sample_evaluations_schedules.py | 46 +- .../sample_get_azure_openai_client.py | 2 +- ...t_chat_completions_client_with_tracing.py} | 16 +- sdk/ai/azure-ai-projects/tests/README.md | 15 - .../tests/agents/test_deserialization.py | 1 + sdk/ai/azure-ai-projects/tests/conftest.py | 5 +- .../tests/connections/connection_test_base.py | 14 +- .../tests/connections/test_connections.py | 1 + .../test_connections_unit_tests.py | 4 +- .../diagnostics/diagnostics_test_base.py | 60 + .../tests/diagnostics/test_diagnostics.py | 21 + .../diagnostics/test_diagnostics_async.py | 21 + .../tests/inference/inference_test_base.py | 19 +- .../tests/inference/test_inference.py | 10 +- .../tests/inference/test_inference_async.py | 13 +- sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 182 files changed, 1027 insertions(+), 68318 deletions(-) delete mode 100644 sdk/ai/azure-ai-client/CHANGELOG.md delete mode 100644 sdk/ai/azure-ai-client/LICENSE delete mode 100644 sdk/ai/azure-ai-client/MANIFEST.in delete mode 100644 sdk/ai/azure-ai-client/README.md delete mode 100644 sdk/ai/azure-ai-client/assets.json delete mode 100644 sdk/ai/azure-ai-client/azure/__init__.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/__init__.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/__init__.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_client.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_configuration.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_model_base.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_patch.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_serialization.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_types.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_vendor.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/_version.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/models/_models.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py delete mode 100644 sdk/ai/azure-ai-client/azure/ai/client/py.typed delete mode 100644 sdk/ai/azure-ai-client/dev_requirements.txt delete mode 100644 sdk/ai/azure-ai-client/requirements.txt delete mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/async_samples/user_async_functions.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/product_info_1.md delete mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py delete mode 100644 sdk/ai/azure-ai-client/samples/agents/user_functions.py delete mode 100644 sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py delete mode 100644 sdk/ai/azure-ai-client/samples/connections/sample_connections.py delete mode 100644 sdk/ai/azure-ai-client/samples/evaluations/evaluate_test_data.jsonl delete mode 100644 sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py delete mode 100644 sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations_schedules.py delete mode 100644 sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py delete mode 100644 sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py delete mode 100644 sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py delete mode 100644 sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py delete mode 100644 sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py delete mode 100644 sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py delete mode 100644 sdk/ai/azure-ai-client/setup.py delete mode 100644 sdk/ai/azure-ai-client/tests/README.md delete mode 100644 sdk/ai/azure-ai-client/tests/agents/test_agents_client.py delete mode 100644 sdk/ai/azure-ai-client/tests/agents/test_deserialization.py delete mode 100644 sdk/ai/azure-ai-client/tests/conftest.py delete mode 100644 sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py delete mode 100644 sdk/ai/azure-ai-client/tsp-location.yaml delete mode 100644 sdk/ai/azure-ai-project/CHANGELOG.md delete mode 100644 sdk/ai/azure-ai-project/LICENSE delete mode 100644 sdk/ai/azure-ai-project/MANIFEST.in delete mode 100644 sdk/ai/azure-ai-project/README.md delete mode 100644 sdk/ai/azure-ai-project/azure/__init__.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/__init__.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/__init__.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_client.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_configuration.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_model_base.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_patch.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_serialization.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_types.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_vendor.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/_version.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/__init__.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/_client.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/_configuration.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/_patch.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/operations/__init__.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_operations.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_patch.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/models/__init__.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/models/_enums.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/models/_models.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/operations/__init__.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/operations/_operations.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/operations/_patch.py delete mode 100644 sdk/ai/azure-ai-project/azure/ai/project/py.typed delete mode 100644 sdk/ai/azure-ai-project/dev_requirements.txt delete mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_basics_async.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_functions_async.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_iteration_async.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/async_samples/user_async_functions.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/product_info_1.md delete mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_basics.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_code_interpreter_attachment.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_file_search.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_functions.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_run_with_toolset.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_functions.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_toolset.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration_with_toolset.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_vector_store_batch_file_search.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/sample_agents_with_file_search_attachment.py delete mode 100644 sdk/ai/azure-ai-project/samples/agents/user_functions.py delete mode 100644 sdk/ai/azure-ai-project/samples/connections/async_samples/sample_connections_async.py delete mode 100644 sdk/ai/azure-ai-project/samples/connections/sample_connections.py delete mode 100644 sdk/ai/azure-ai-project/samples/evaluations/evaluate_test_data.jsonl delete mode 100644 sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations.py delete mode 100644 sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations_schedules.py delete mode 100644 sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_azure_openai_client_async.py delete mode 100644 sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_chat_completions_client_async.py delete mode 100644 sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_embeddings_client_async.py delete mode 100644 sdk/ai/azure-ai-project/samples/inference/sample_get_azure_openai_client.py delete mode 100644 sdk/ai/azure-ai-project/samples/inference/sample_get_embeddings_client.py delete mode 100644 sdk/ai/azure-ai-project/setup.py delete mode 100644 sdk/ai/azure-ai-project/tests/README.md delete mode 100644 sdk/ai/azure-ai-project/tests/agents/test_agents_client.py delete mode 100644 sdk/ai/azure-ai-project/tests/agents/test_deserialization.py delete mode 100644 sdk/ai/azure-ai-project/tests/conftest.py delete mode 100644 sdk/ai/azure-ai-project/tests/connections/connection_test_base.py delete mode 100644 sdk/ai/azure-ai-project/tests/connections/test_connections.py delete mode 100644 sdk/ai/azure-ai-project/tests/connections/test_connections_unit_tests.py delete mode 100644 sdk/ai/azure-ai-project/tsp-location.yaml create mode 100644 sdk/ai/azure-ai-projects/azure_ai_projects_tests.env rename sdk/ai/{azure-ai-project/samples/inference/sample_get_chat_completions_client.py => azure-ai-projects/samples/inference/sample_get_chat_completions_client_with_tracing.py} (67%) create mode 100644 sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py create mode 100644 sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py create mode 100644 sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py diff --git a/sdk/ai/azure-ai-client/CHANGELOG.md b/sdk/ai/azure-ai-client/CHANGELOG.md deleted file mode 100644 index 628743d283a9..000000000000 --- a/sdk/ai/azure-ai-client/CHANGELOG.md +++ /dev/null @@ -1,5 +0,0 @@ -# Release History - -## 1.0.0b1 (1970-01-01) - -- Initial version diff --git a/sdk/ai/azure-ai-client/LICENSE b/sdk/ai/azure-ai-client/LICENSE deleted file mode 100644 index 63447fd8bbbf..000000000000 --- a/sdk/ai/azure-ai-client/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) Microsoft Corporation. - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/MANIFEST.in b/sdk/ai/azure-ai-client/MANIFEST.in deleted file mode 100644 index cfc4bcbd9797..000000000000 --- a/sdk/ai/azure-ai-client/MANIFEST.in +++ /dev/null @@ -1,7 +0,0 @@ -include *.md -include LICENSE -include azure/ai/client/py.typed -recursive-include tests *.py -recursive-include samples *.py *.md -include azure/__init__.py -include azure/ai/__init__.py \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/README.md b/sdk/ai/azure-ai-client/README.md deleted file mode 100644 index d6bc4ea4c008..000000000000 --- a/sdk/ai/azure-ai-client/README.md +++ /dev/null @@ -1,80 +0,0 @@ - - -# Azure Ai Client client library for Python - - -## Getting started - -### Install the package - -```bash -python -m pip install azure-ai-client -``` - -#### Prequisites - -- Python 3.8 or later is required to use this package. -- You need an [Azure subscription][azure_sub] to use this package. -- An existing Azure Ai Client instance. -#### Create with an Azure Active Directory Credential -To use an [Azure Active Directory (AAD) token credential][authenticate_with_token], -provide an instance of the desired credential type obtained from the -[azure-identity][azure_identity_credentials] library. - -To authenticate with AAD, you must first [pip][pip] install [`azure-identity`][azure_identity_pip] - -After setup, you can choose which type of [credential][azure_identity_credentials] from azure.identity to use. -As an example, [DefaultAzureCredential][default_azure_credential] can be used to authenticate the client: - -Set the values of the client ID, tenant ID, and client secret of the AAD application as environment variables: -`AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, `AZURE_CLIENT_SECRET` - -Use the returned token credential to authenticate the client: - -```python ->>> from azure.ai.client import Client ->>> from azure.identity import DefaultAzureCredential ->>> client = Client(endpoint='', credential=DefaultAzureCredential()) -``` - -## Examples - -```python ->>> from azure.ai.client import Client ->>> from azure.identity import DefaultAzureCredential ->>> from azure.core.exceptions import HttpResponseError - ->>> client = Client(endpoint='', credential=DefaultAzureCredential()) ->>> try: - - except HttpResponseError as e: - print('service responds error: {}'.format(e.response.json())) - -``` - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require -you to agree to a Contributor License Agreement (CLA) declaring that you have -the right to, and actually do, grant us the rights to use your contribution. -For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether -you need to provide a CLA and decorate the PR appropriately (e.g., label, -comment). Simply follow the instructions provided by the bot. You will only -need to do this once across all repos using our CLA. - -This project has adopted the -[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, -see the Code of Conduct FAQ or contact opencode@microsoft.com with any -additional questions or comments. - - -[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ -[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token -[azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials -[azure_identity_pip]: https://pypi.org/project/azure-identity/ -[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential -[pip]: https://pypi.org/project/pip/ -[azure_sub]: https://azure.microsoft.com/free/ - diff --git a/sdk/ai/azure-ai-client/assets.json b/sdk/ai/azure-ai-client/assets.json deleted file mode 100644 index a2aabb4cbaf7..000000000000 --- a/sdk/ai/azure-ai-client/assets.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "AssetsRepo": "Azure/azure-sdk-assets", - "AssetsRepoPrefixPath": "python", - "TagPrefix": "python/ai/azure-ai-client", - "Tag": "python/ai/azure-ai-client_246b906947" -} diff --git a/sdk/ai/azure-ai-client/azure/__init__.py b/sdk/ai/azure-ai-client/azure/__init__.py deleted file mode 100644 index d55ccad1f573..000000000000 --- a/sdk/ai/azure-ai-client/azure/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/__init__.py b/sdk/ai/azure-ai-client/azure/ai/__init__.py deleted file mode 100644 index d55ccad1f573..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/client/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/__init__.py deleted file mode 100644 index 809ca737e8e4..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._client import AzureAIClient -from ._version import VERSION - -__version__ = VERSION - -try: - from ._patch import __all__ as _patch_all - from ._patch import * # pylint: disable=unused-wildcard-import -except ImportError: - _patch_all = [] -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "AzureAIClient", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore - -_patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/_client.py deleted file mode 100644 index cc8c0f6da8b1..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/_client.py +++ /dev/null @@ -1,137 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any, TYPE_CHECKING -from typing_extensions import Self - -from azure.core import PipelineClient -from azure.core.pipeline import policies -from azure.core.rest import HttpRequest, HttpResponse - -from ._configuration import AzureAIClientConfiguration -from ._serialization import Deserializer, Serializer -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations - -if TYPE_CHECKING: - from azure.core.credentials import TokenCredential - - -class AzureAIClient: - """AzureAIClient. - - :ivar agents: AgentsOperations operations - :vartype agents: azure.ai.client.operations.AgentsOperations - :ivar connections: ConnectionsOperations operations - :vartype connections: azure.ai.client.operations.ConnectionsOperations - :ivar evaluations: EvaluationsOperations operations - :vartype evaluations: azure.ai.client.operations.EvaluationsOperations - :param endpoint: The Azure AI Studio project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``\\\\ , where - :code:`` is the Azure region where the project is deployed (e.g. westus) and - :code:`` is the GUID of the Enterprise private link. Required. - :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Studio project name. Required. - :type project_name: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-07-01-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "TokenCredential", - **kwargs: Any - ) -> None: - _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" # pylint: disable=line-too-long - self._config = AzureAIClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - **kwargs - ) - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) - self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) - self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) - - def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = client.send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.HttpResponse - """ - - request_copy = deepcopy(request) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - - request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) - return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - - def close(self) -> None: - self._client.close() - - def __enter__(self) -> Self: - self._client.__enter__() - return self - - def __exit__(self, *exc_details: Any) -> None: - self._client.__exit__(*exc_details) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py deleted file mode 100644 index dde212466722..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/_configuration.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline import policies - -from ._version import VERSION - -if TYPE_CHECKING: - from azure.core.credentials import TokenCredential - - -class AzureAIClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for AzureAIClient. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param endpoint: The Azure AI Studio project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``\\ , where :code:`` - is the Azure region where the project is deployed (e.g. westus) and :code:`` - is the GUID of the Enterprise private link. Required. - :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Studio project name. Required. - :type project_name: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-07-01-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "TokenCredential", - **kwargs: Any - ) -> None: - api_version: str = kwargs.pop("api_version", "2024-07-01-preview") - - if endpoint is None: - raise ValueError("Parameter 'endpoint' must not be None.") - if subscription_id is None: - raise ValueError("Parameter 'subscription_id' must not be None.") - if resource_group_name is None: - raise ValueError("Parameter 'resource_group_name' must not be None.") - if project_name is None: - raise ValueError("Parameter 'project_name' must not be None.") - if credential is None: - raise ValueError("Parameter 'credential' must not be None.") - - self.endpoint = endpoint - self.subscription_id = subscription_id - self.resource_group_name = resource_group_name - self.project_name = project_name - self.credential = credential - self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) - kwargs.setdefault("sdk_moniker", "ai-client/{}".format(VERSION)) - self.polling_interval = kwargs.get("polling_interval", 30) - self._configure(**kwargs) - - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.BearerTokenCredentialPolicy( - self.credential, *self.credential_scopes, **kwargs - ) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_model_base.py b/sdk/ai/azure-ai-client/azure/ai/client/_model_base.py deleted file mode 100644 index 9d401b0cf012..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/_model_base.py +++ /dev/null @@ -1,1159 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=protected-access, arguments-differ, signature-differs, broad-except, too-many-lines - -import copy -import calendar -import decimal -import functools -import sys -import logging -import base64 -import re -import typing -import enum -import email.utils -from datetime import datetime, date, time, timedelta, timezone -from json import JSONEncoder -import xml.etree.ElementTree as ET -from typing_extensions import Self -import isodate -from azure.core.exceptions import DeserializationError -from azure.core import CaseInsensitiveEnumMeta -from azure.core.pipeline import PipelineResponse -from azure.core.serialization import _Null - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping - -_LOGGER = logging.getLogger(__name__) - -__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] - -TZ_UTC = timezone.utc -_T = typing.TypeVar("_T") - - -def _timedelta_as_isostr(td: timedelta) -> str: - """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' - - Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython - - :param timedelta td: The timedelta to convert - :rtype: str - :return: ISO8601 version of this timedelta - """ - - # Split seconds to larger units - seconds = td.total_seconds() - minutes, seconds = divmod(seconds, 60) - hours, minutes = divmod(minutes, 60) - days, hours = divmod(hours, 24) - - days, hours, minutes = list(map(int, (days, hours, minutes))) - seconds = round(seconds, 6) - - # Build date - date_str = "" - if days: - date_str = "%sD" % days - - if hours or minutes or seconds: - # Build time - time_str = "T" - - # Hours - bigger_exists = date_str or hours - if bigger_exists: - time_str += "{:02}H".format(hours) - - # Minutes - bigger_exists = bigger_exists or minutes - if bigger_exists: - time_str += "{:02}M".format(minutes) - - # Seconds - try: - if seconds.is_integer(): - seconds_string = "{:02}".format(int(seconds)) - else: - # 9 chars long w/ leading 0, 6 digits after decimal - seconds_string = "%09.6f" % seconds - # Remove trailing zeros - seconds_string = seconds_string.rstrip("0") - except AttributeError: # int.is_integer() raises - seconds_string = "{:02}".format(seconds) - - time_str += "{}S".format(seconds_string) - else: - time_str = "" - - return "P" + date_str + time_str - - -def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: - encoded = base64.b64encode(o).decode() - if format == "base64url": - return encoded.strip("=").replace("+", "-").replace("/", "_") - return encoded - - -def _serialize_datetime(o, format: typing.Optional[str] = None): - if hasattr(o, "year") and hasattr(o, "hour"): - if format == "rfc7231": - return email.utils.format_datetime(o, usegmt=True) - if format == "unix-timestamp": - return int(calendar.timegm(o.utctimetuple())) - - # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) - if not o.tzinfo: - iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() - else: - iso_formatted = o.astimezone(TZ_UTC).isoformat() - # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) - return iso_formatted.replace("+00:00", "Z") - # Next try datetime.date or datetime.time - return o.isoformat() - - -def _is_readonly(p): - try: - return p._visibility == ["read"] - except AttributeError: - return False - - -class SdkJSONEncoder(JSONEncoder): - """A JSON encoder that's capable of serializing datetime objects and bytes.""" - - def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): - super().__init__(*args, **kwargs) - self.exclude_readonly = exclude_readonly - self.format = format - - def default(self, o): # pylint: disable=too-many-return-statements - if _is_model(o): - if self.exclude_readonly: - readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] - return {k: v for k, v in o.items() if k not in readonly_props} - return dict(o.items()) - try: - return super(SdkJSONEncoder, self).default(o) - except TypeError: - if isinstance(o, _Null): - return None - if isinstance(o, decimal.Decimal): - return float(o) - if isinstance(o, (bytes, bytearray)): - return _serialize_bytes(o, self.format) - try: - # First try datetime.datetime - return _serialize_datetime(o, self.format) - except AttributeError: - pass - # Last, try datetime.timedelta - try: - return _timedelta_as_isostr(o) - except AttributeError: - # This will be raised when it hits value.total_seconds in the method above - pass - return super(SdkJSONEncoder, self).default(o) - - -_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") -_VALID_RFC7231 = re.compile( - r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" - r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" -) - - -def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: - """Deserialize ISO-8601 formatted string into Datetime object. - - :param str attr: response string to be deserialized. - :rtype: ~datetime.datetime - :returns: The datetime object from that input - """ - if isinstance(attr, datetime): - # i'm already deserialized - return attr - attr = attr.upper() - match = _VALID_DATE.match(attr) - if not match: - raise ValueError("Invalid datetime string: " + attr) - - check_decimal = attr.split(".") - if len(check_decimal) > 1: - decimal_str = "" - for digit in check_decimal[1]: - if digit.isdigit(): - decimal_str += digit - else: - break - if len(decimal_str) > 6: - attr = attr.replace(decimal_str, decimal_str[0:6]) - - date_obj = isodate.parse_datetime(attr) - test_utc = date_obj.utctimetuple() - if test_utc.tm_year > 9999 or test_utc.tm_year < 1: - raise OverflowError("Hit max or min date") - return date_obj - - -def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: - """Deserialize RFC7231 formatted string into Datetime object. - - :param str attr: response string to be deserialized. - :rtype: ~datetime.datetime - :returns: The datetime object from that input - """ - if isinstance(attr, datetime): - # i'm already deserialized - return attr - match = _VALID_RFC7231.match(attr) - if not match: - raise ValueError("Invalid datetime string: " + attr) - - return email.utils.parsedate_to_datetime(attr) - - -def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: - """Deserialize unix timestamp into Datetime object. - - :param str attr: response string to be deserialized. - :rtype: ~datetime.datetime - :returns: The datetime object from that input - """ - if isinstance(attr, datetime): - # i'm already deserialized - return attr - return datetime.fromtimestamp(attr, TZ_UTC) - - -def _deserialize_date(attr: typing.Union[str, date]) -> date: - """Deserialize ISO-8601 formatted string into Date object. - :param str attr: response string to be deserialized. - :rtype: date - :returns: The date object from that input - """ - # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. - if isinstance(attr, date): - return attr - return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore - - -def _deserialize_time(attr: typing.Union[str, time]) -> time: - """Deserialize ISO-8601 formatted string into time object. - - :param str attr: response string to be deserialized. - :rtype: datetime.time - :returns: The time object from that input - """ - if isinstance(attr, time): - return attr - return isodate.parse_time(attr) - - -def _deserialize_bytes(attr): - if isinstance(attr, (bytes, bytearray)): - return attr - return bytes(base64.b64decode(attr)) - - -def _deserialize_bytes_base64(attr): - if isinstance(attr, (bytes, bytearray)): - return attr - padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore - attr = attr + padding # type: ignore - encoded = attr.replace("-", "+").replace("_", "/") - return bytes(base64.b64decode(encoded)) - - -def _deserialize_duration(attr): - if isinstance(attr, timedelta): - return attr - return isodate.parse_duration(attr) - - -def _deserialize_decimal(attr): - if isinstance(attr, decimal.Decimal): - return attr - return decimal.Decimal(str(attr)) - - -def _deserialize_int_as_str(attr): - if isinstance(attr, int): - return attr - return int(attr) - - -_DESERIALIZE_MAPPING = { - datetime: _deserialize_datetime, - date: _deserialize_date, - time: _deserialize_time, - bytes: _deserialize_bytes, - bytearray: _deserialize_bytes, - timedelta: _deserialize_duration, - typing.Any: lambda x: x, - decimal.Decimal: _deserialize_decimal, -} - -_DESERIALIZE_MAPPING_WITHFORMAT = { - "rfc3339": _deserialize_datetime, - "rfc7231": _deserialize_datetime_rfc7231, - "unix-timestamp": _deserialize_datetime_unix_timestamp, - "base64": _deserialize_bytes, - "base64url": _deserialize_bytes_base64, -} - - -def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): - if annotation is int and rf and rf._format == "str": - return _deserialize_int_as_str - if rf and rf._format: - return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) - return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore - - -def _get_type_alias_type(module_name: str, alias_name: str): - types = { - k: v - for k, v in sys.modules[module_name].__dict__.items() - if isinstance(v, typing._GenericAlias) # type: ignore - } - if alias_name not in types: - return alias_name - return types[alias_name] - - -def _get_model(module_name: str, model_name: str): - models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} - module_end = module_name.rsplit(".", 1)[0] - models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) - if isinstance(model_name, str): - model_name = model_name.split(".")[-1] - if model_name not in models: - return model_name - return models[model_name] - - -_UNSET = object() - - -class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object - def __init__(self, data: typing.Dict[str, typing.Any]) -> None: - self._data = data - - def __contains__(self, key: typing.Any) -> bool: - return key in self._data - - def __getitem__(self, key: str) -> typing.Any: - return self._data.__getitem__(key) - - def __setitem__(self, key: str, value: typing.Any) -> None: - self._data.__setitem__(key, value) - - def __delitem__(self, key: str) -> None: - self._data.__delitem__(key) - - def __iter__(self) -> typing.Iterator[typing.Any]: - return self._data.__iter__() - - def __len__(self) -> int: - return self._data.__len__() - - def __ne__(self, other: typing.Any) -> bool: - return not self.__eq__(other) - - def keys(self) -> typing.KeysView[str]: - return self._data.keys() - - def values(self) -> typing.ValuesView[typing.Any]: - return self._data.values() - - def items(self) -> typing.ItemsView[str, typing.Any]: - return self._data.items() - - def get(self, key: str, default: typing.Any = None) -> typing.Any: - try: - return self[key] - except KeyError: - return default - - @typing.overload - def pop(self, key: str) -> typing.Any: ... - - @typing.overload - def pop(self, key: str, default: _T) -> _T: ... - - @typing.overload - def pop(self, key: str, default: typing.Any) -> typing.Any: ... - - def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: - if default is _UNSET: - return self._data.pop(key) - return self._data.pop(key, default) - - def popitem(self) -> typing.Tuple[str, typing.Any]: - return self._data.popitem() - - def clear(self) -> None: - self._data.clear() - - def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: - self._data.update(*args, **kwargs) - - @typing.overload - def setdefault(self, key: str, default: None = None) -> None: ... - - @typing.overload - def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... - - def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: - if default is _UNSET: - return self._data.setdefault(key) - return self._data.setdefault(key, default) - - def __eq__(self, other: typing.Any) -> bool: - try: - other_model = self.__class__(other) - except Exception: - return False - return self._data == other_model._data - - def __repr__(self) -> str: - return str(self._data) - - -def _is_model(obj: typing.Any) -> bool: - return getattr(obj, "_is_model", False) - - -def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements - if isinstance(o, list): - return [_serialize(x, format) for x in o] - if isinstance(o, dict): - return {k: _serialize(v, format) for k, v in o.items()} - if isinstance(o, set): - return {_serialize(x, format) for x in o} - if isinstance(o, tuple): - return tuple(_serialize(x, format) for x in o) - if isinstance(o, (bytes, bytearray)): - return _serialize_bytes(o, format) - if isinstance(o, decimal.Decimal): - return float(o) - if isinstance(o, enum.Enum): - return o.value - if isinstance(o, int): - if format == "str": - return str(o) - return o - try: - # First try datetime.datetime - return _serialize_datetime(o, format) - except AttributeError: - pass - # Last, try datetime.timedelta - try: - return _timedelta_as_isostr(o) - except AttributeError: - # This will be raised when it hits value.total_seconds in the method above - pass - return o - - -def _get_rest_field( - attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str -) -> typing.Optional["_RestField"]: - try: - return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) - except StopIteration: - return None - - -def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: - if not rf: - return _serialize(value, None) - if rf._is_multipart_file_input: - return value - if rf._is_model: - return _deserialize(rf._type, value) - if isinstance(value, ET.Element): - value = _deserialize(rf._type, value) - return _serialize(value, rf._format) - - -class Model(_MyMutableMapping): - _is_model = True - # label whether current class's _attr_to_rest_field has been calculated - # could not see _attr_to_rest_field directly because subclass inherits it from parent class - _calculated: typing.Set[str] = set() - - def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: - class_name = self.__class__.__name__ - if len(args) > 1: - raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") - dict_to_pass = { - rest_field._rest_name: rest_field._default - for rest_field in self._attr_to_rest_field.values() - if rest_field._default is not _UNSET - } - if args: # pylint: disable=too-many-nested-blocks - if isinstance(args[0], ET.Element): - existed_attr_keys = [] - model_meta = getattr(self, "_xml", {}) - - for rf in self._attr_to_rest_field.values(): - prop_meta = getattr(rf, "_xml", {}) - xml_name = prop_meta.get("name", rf._rest_name) - xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) - if xml_ns: - xml_name = "{" + xml_ns + "}" + xml_name - - # attribute - if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None: - existed_attr_keys.append(xml_name) - dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name)) - continue - - # unwrapped element is array - if prop_meta.get("unwrapped", False): - # unwrapped array could either use prop items meta/prop meta - if prop_meta.get("itemsName"): - xml_name = prop_meta.get("itemsName") - xml_ns = prop_meta.get("itemNs") - if xml_ns: - xml_name = "{" + xml_ns + "}" + xml_name - items = args[0].findall(xml_name) # pyright: ignore - if len(items) > 0: - existed_attr_keys.append(xml_name) - dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) - continue - - # text element is primitive type - if prop_meta.get("text", False): - if args[0].text is not None: - dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text) - continue - - # wrapped element could be normal property or array, it should only have one element - item = args[0].find(xml_name) - if item is not None: - existed_attr_keys.append(xml_name) - dict_to_pass[rf._rest_name] = _deserialize(rf._type, item) - - # rest thing is additional properties - for e in args[0]: - if e.tag not in existed_attr_keys: - dict_to_pass[e.tag] = _convert_element(e) - else: - dict_to_pass.update( - {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} - ) - else: - non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] - if non_attr_kwargs: - # actual type errors only throw the first wrong keyword arg they see, so following that. - raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") - dict_to_pass.update( - { - self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) - for k, v in kwargs.items() - if v is not None - } - ) - super().__init__(dict_to_pass) - - def copy(self) -> "Model": - return Model(self.__dict__) - - def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: # pylint: disable=unused-argument - if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: - # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', - # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' - mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order - attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property - k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") - } - annotations = { - k: v - for mro_class in mros - if hasattr(mro_class, "__annotations__") # pylint: disable=no-member - for k, v in mro_class.__annotations__.items() # pylint: disable=no-member - } - for attr, rf in attr_to_rest_field.items(): - rf._module = cls.__module__ - if not rf._type: - rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) - if not rf._rest_name_input: - rf._rest_name_input = attr - cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) - cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") - - return super().__new__(cls) # pylint: disable=no-value-for-parameter - - def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: - for base in cls.__bases__: - if hasattr(base, "__mapping__"): # pylint: disable=no-member - base.__mapping__[discriminator or cls.__name__] = cls # type: ignore # pylint: disable=no-member - - @classmethod - def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: - for v in cls.__dict__.values(): - if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators: - return v - return None - - @classmethod - def _deserialize(cls, data, exist_discriminators): - if not hasattr(cls, "__mapping__"): # pylint: disable=no-member - return cls(data) - discriminator = cls._get_discriminator(exist_discriminators) - if discriminator is None: - return cls(data) - exist_discriminators.append(discriminator._rest_name) - if isinstance(data, ET.Element): - model_meta = getattr(cls, "_xml", {}) - prop_meta = getattr(discriminator, "_xml", {}) - xml_name = prop_meta.get("name", discriminator._rest_name) - xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) - if xml_ns: - xml_name = "{" + xml_ns + "}" + xml_name - - if data.get(xml_name) is not None: - discriminator_value = data.get(xml_name) - else: - discriminator_value = data.find(xml_name).text # pyright: ignore - else: - discriminator_value = data.get(discriminator._rest_name) - mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore # pylint: disable=no-member - return mapped_cls._deserialize(data, exist_discriminators) - - def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: - """Return a dict that can be turned into json using json.dump. - - :keyword bool exclude_readonly: Whether to remove the readonly properties. - :returns: A dict JSON compatible object - :rtype: dict - """ - - result = {} - readonly_props = [] - if exclude_readonly: - readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] - for k, v in self.items(): - if exclude_readonly and k in readonly_props: # pyright: ignore - continue - is_multipart_file_input = False - try: - is_multipart_file_input = next( - rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k - )._is_multipart_file_input - except StopIteration: - pass - result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) - return result - - @staticmethod - def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: - if v is None or isinstance(v, _Null): - return None - if isinstance(v, (list, tuple, set)): - return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) - if isinstance(v, dict): - return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} - return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v - - -def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): - if _is_model(obj): - return obj - return _deserialize(model_deserializer, obj) - - -def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): - if obj is None: - return obj - return _deserialize_with_callable(if_obj_deserializer, obj) - - -def _deserialize_with_union(deserializers, obj): - for deserializer in deserializers: - try: - return _deserialize(deserializer, obj) - except DeserializationError: - pass - raise DeserializationError() - - -def _deserialize_dict( - value_deserializer: typing.Optional[typing.Callable], - module: typing.Optional[str], - obj: typing.Dict[typing.Any, typing.Any], -): - if obj is None: - return obj - if isinstance(obj, ET.Element): - obj = {child.tag: child for child in obj} - return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} - - -def _deserialize_multiple_sequence( - entry_deserializers: typing.List[typing.Optional[typing.Callable]], - module: typing.Optional[str], - obj, -): - if obj is None: - return obj - return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) - - -def _deserialize_sequence( - deserializer: typing.Optional[typing.Callable], - module: typing.Optional[str], - obj, -): - if obj is None: - return obj - if isinstance(obj, ET.Element): - obj = list(obj) - return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) - - -def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.Any]: - return sorted( - types, - key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), - ) - - -def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-branches - annotation: typing.Any, - module: typing.Optional[str], - rf: typing.Optional["_RestField"] = None, -) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: - if not annotation: - return None - - # is it a type alias? - if isinstance(annotation, str): - if module is not None: - annotation = _get_type_alias_type(module, annotation) - - # is it a forward ref / in quotes? - if isinstance(annotation, (str, typing.ForwardRef)): - try: - model_name = annotation.__forward_arg__ # type: ignore - except AttributeError: - model_name = annotation - if module is not None: - annotation = _get_model(module, model_name) - - try: - if module and _is_model(annotation): - if rf: - rf._is_model = True - - return functools.partial(_deserialize_model, annotation) # pyright: ignore - except Exception: - pass - - # is it a literal? - try: - if annotation.__origin__ is typing.Literal: # pyright: ignore - return None - except AttributeError: - pass - - # is it optional? - try: - if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore - if len(annotation.__args__) <= 2: # pyright: ignore - if_obj_deserializer = _get_deserialize_callable_from_annotation( - next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore - ) - - return functools.partial(_deserialize_with_optional, if_obj_deserializer) - # the type is Optional[Union[...]], we need to remove the None type from the Union - annotation_copy = copy.copy(annotation) - annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore - return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) - except AttributeError: - pass - - # is it union? - if getattr(annotation, "__origin__", None) is typing.Union: - # initial ordering is we make `string` the last deserialization option, because it is often them most generic - deserializers = [ - _get_deserialize_callable_from_annotation(arg, module, rf) - for arg in _sorted_annotations(annotation.__args__) # pyright: ignore - ] - - return functools.partial(_deserialize_with_union, deserializers) - - try: - if annotation._name == "Dict": # pyright: ignore - value_deserializer = _get_deserialize_callable_from_annotation( - annotation.__args__[1], module, rf # pyright: ignore - ) - - return functools.partial( - _deserialize_dict, - value_deserializer, - module, - ) - except (AttributeError, IndexError): - pass - try: - if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore - if len(annotation.__args__) > 1: # pyright: ignore - entry_deserializers = [ - _get_deserialize_callable_from_annotation(dt, module, rf) - for dt in annotation.__args__ # pyright: ignore - ] - return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) - deserializer = _get_deserialize_callable_from_annotation( - annotation.__args__[0], module, rf # pyright: ignore - ) - - return functools.partial(_deserialize_sequence, deserializer, module) - except (TypeError, IndexError, AttributeError, SyntaxError): - pass - - def _deserialize_default( - deserializer, - obj, - ): - if obj is None: - return obj - try: - return _deserialize_with_callable(deserializer, obj) - except Exception: - pass - return obj - - if get_deserializer(annotation, rf): - return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) - - return functools.partial(_deserialize_default, annotation) - - -def _deserialize_with_callable( - deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], - value: typing.Any, -): # pylint: disable=too-many-return-statements - try: - if value is None or isinstance(value, _Null): - return None - if isinstance(value, ET.Element): - if deserializer is str: - return value.text or "" - if deserializer is int: - return int(value.text) if value.text else None - if deserializer is float: - return float(value.text) if value.text else None - if deserializer is bool: - return value.text == "true" if value.text else None - if deserializer is None: - return value - if deserializer in [int, float, bool]: - return deserializer(value) - if isinstance(deserializer, CaseInsensitiveEnumMeta): - try: - return deserializer(value) - except ValueError: - # for unknown value, return raw value - return value - if isinstance(deserializer, type) and issubclass(deserializer, Model): - return deserializer._deserialize(value, []) - return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) - except Exception as e: - raise DeserializationError() from e - - -def _deserialize( - deserializer: typing.Any, - value: typing.Any, - module: typing.Optional[str] = None, - rf: typing.Optional["_RestField"] = None, - format: typing.Optional[str] = None, -) -> typing.Any: - if isinstance(value, PipelineResponse): - value = value.http_response.json() - if rf is None and format: - rf = _RestField(format=format) - if not isinstance(deserializer, functools.partial): - deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) - return _deserialize_with_callable(deserializer, value) - - -class _RestField: - def __init__( - self, - *, - name: typing.Optional[str] = None, - type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin - is_discriminator: bool = False, - visibility: typing.Optional[typing.List[str]] = None, - default: typing.Any = _UNSET, - format: typing.Optional[str] = None, - is_multipart_file_input: bool = False, - xml: typing.Optional[typing.Dict[str, typing.Any]] = None, - ): - self._type = type - self._rest_name_input = name - self._module: typing.Optional[str] = None - self._is_discriminator = is_discriminator - self._visibility = visibility - self._is_model = False - self._default = default - self._format = format - self._is_multipart_file_input = is_multipart_file_input - self._xml = xml if xml is not None else {} - - @property - def _class_type(self) -> typing.Any: - return getattr(self._type, "args", [None])[0] - - @property - def _rest_name(self) -> str: - if self._rest_name_input is None: - raise ValueError("Rest name was never set") - return self._rest_name_input - - def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin - # by this point, type and rest_name will have a value bc we default - # them in __new__ of the Model class - item = obj.get(self._rest_name) - if item is None: - return item - if self._is_model: - return item - return _deserialize(self._type, _serialize(item, self._format), rf=self) - - def __set__(self, obj: Model, value) -> None: - if value is None: - # we want to wipe out entries if users set attr to None - try: - obj.__delitem__(self._rest_name) - except KeyError: - pass - return - if self._is_model: - if not _is_model(value): - value = _deserialize(self._type, value) - obj.__setitem__(self._rest_name, value) - return - obj.__setitem__(self._rest_name, _serialize(value, self._format)) - - def _get_deserialize_callable_from_annotation( - self, annotation: typing.Any - ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: - return _get_deserialize_callable_from_annotation(annotation, self._module, self) - - -def rest_field( - *, - name: typing.Optional[str] = None, - type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin - visibility: typing.Optional[typing.List[str]] = None, - default: typing.Any = _UNSET, - format: typing.Optional[str] = None, - is_multipart_file_input: bool = False, - xml: typing.Optional[typing.Dict[str, typing.Any]] = None, -) -> typing.Any: - return _RestField( - name=name, - type=type, - visibility=visibility, - default=default, - format=format, - is_multipart_file_input=is_multipart_file_input, - xml=xml, - ) - - -def rest_discriminator( - *, - name: typing.Optional[str] = None, - type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin - visibility: typing.Optional[typing.List[str]] = None, - xml: typing.Optional[typing.Dict[str, typing.Any]] = None, -) -> typing.Any: - return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) - - -def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: - """Serialize a model to XML. - - :param Model model: The model to serialize. - :param bool exclude_readonly: Whether to exclude readonly properties. - :returns: The XML representation of the model. - :rtype: str - """ - return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore - - -def _get_element( - o: typing.Any, - exclude_readonly: bool = False, - parent_meta: typing.Optional[typing.Dict[str, typing.Any]] = None, - wrapped_element: typing.Optional[ET.Element] = None, -) -> typing.Union[ET.Element, typing.List[ET.Element]]: - if _is_model(o): - model_meta = getattr(o, "_xml", {}) - - # if prop is a model, then use the prop element directly, else generate a wrapper of model - if wrapped_element is None: - wrapped_element = _create_xml_element( - model_meta.get("name", o.__class__.__name__), - model_meta.get("prefix"), - model_meta.get("ns"), - ) - - readonly_props = [] - if exclude_readonly: - readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] - - for k, v in o.items(): - # do not serialize readonly properties - if exclude_readonly and k in readonly_props: - continue - - prop_rest_field = _get_rest_field(o._attr_to_rest_field, k) - if prop_rest_field: - prop_meta = getattr(prop_rest_field, "_xml").copy() - # use the wire name as xml name if no specific name is set - if prop_meta.get("name") is None: - prop_meta["name"] = k - else: - # additional properties will not have rest field, use the wire name as xml name - prop_meta = {"name": k} - - # if no ns for prop, use model's - if prop_meta.get("ns") is None and model_meta.get("ns"): - prop_meta["ns"] = model_meta.get("ns") - prop_meta["prefix"] = model_meta.get("prefix") - - if prop_meta.get("unwrapped", False): - # unwrapped could only set on array - wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta)) - elif prop_meta.get("text", False): - # text could only set on primitive type - wrapped_element.text = _get_primitive_type_value(v) - elif prop_meta.get("attribute", False): - xml_name = prop_meta.get("name", k) - if prop_meta.get("ns"): - ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore - xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore - # attribute should be primitive type - wrapped_element.set(xml_name, _get_primitive_type_value(v)) - else: - # other wrapped prop element - wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta)) - return wrapped_element - if isinstance(o, list): - return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore - if isinstance(o, dict): - result = [] - for k, v in o.items(): - result.append( - _get_wrapped_element( - v, - exclude_readonly, - { - "name": k, - "ns": parent_meta.get("ns") if parent_meta else None, - "prefix": parent_meta.get("prefix") if parent_meta else None, - }, - ) - ) - return result - - # primitive case need to create element based on parent_meta - if parent_meta: - return _get_wrapped_element( - o, - exclude_readonly, - { - "name": parent_meta.get("itemsName", parent_meta.get("name")), - "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")), - "ns": parent_meta.get("itemsNs", parent_meta.get("ns")), - }, - ) - - raise ValueError("Could not serialize value into xml: " + o) - - -def _get_wrapped_element( - v: typing.Any, - exclude_readonly: bool, - meta: typing.Optional[typing.Dict[str, typing.Any]], -) -> ET.Element: - wrapped_element = _create_xml_element( - meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None - ) - if isinstance(v, (dict, list)): - wrapped_element.extend(_get_element(v, exclude_readonly, meta)) - elif _is_model(v): - _get_element(v, exclude_readonly, meta, wrapped_element) - else: - wrapped_element.text = _get_primitive_type_value(v) - return wrapped_element - - -def _get_primitive_type_value(v) -> str: - if v is True: - return "true" - if v is False: - return "false" - if isinstance(v, _Null): - return "" - return str(v) - - -def _create_xml_element(tag, prefix=None, ns=None): - if prefix and ns: - ET.register_namespace(prefix, ns) - if ns: - return ET.Element("{" + ns + "}" + tag) - return ET.Element(tag) - - -def _deserialize_xml( - deserializer: typing.Any, - value: str, -) -> typing.Any: - element = ET.fromstring(value) # nosec - return _deserialize(deserializer, element) - - -def _convert_element(e: ET.Element): - # dict case - if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: - dict_result: typing.Dict[str, typing.Any] = {} - for child in e: - if dict_result.get(child.tag) is not None: - if isinstance(dict_result[child.tag], list): - dict_result[child.tag].append(_convert_element(child)) - else: - dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)] - else: - dict_result[child.tag] = _convert_element(child) - dict_result.update(e.attrib) - return dict_result - # array case - if len(e) > 0: - array_result: typing.List[typing.Any] = [] - for child in e: - array_result.append(_convert_element(child)) - return array_result - # primitive case - return e.text diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/_patch.py deleted file mode 100644 index 3ca93bf89165..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/_patch.py +++ /dev/null @@ -1,246 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -import uuid -from os import PathLike -from pathlib import Path -from typing import List, Any, Union, Dict -from typing_extensions import Self -from azure.core.credentials import TokenCredential -from azure.core import PipelineClient -from azure.core.pipeline import policies -from ._configuration import AzureAIClientConfiguration -from ._serialization import Deserializer, Serializer -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations -from ._client import AzureAIClient as ClientGenerated -from .operations._patch import InferenceOperations - - -class AzureAIClient(ClientGenerated): - - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "TokenCredential", - **kwargs: Any, - ) -> None: - # TODO: Validate input formats with regex match (e.g. subscription ID) - if not endpoint: - raise ValueError("endpoint is required") - if not subscription_id: - raise ValueError("subscription_id ID is required") - if not resource_group_name: - raise ValueError("resource_group_name is required") - if not project_name: - raise ValueError("project_name is required") - if not credential: - raise ValueError("Credential is required") - if "api_version" in kwargs: - raise ValueError("No support for overriding the API version") - if "credential_scopes" in kwargs: - raise ValueError("No support for overriding the credential scopes") - - kwargs1 = kwargs.copy() - kwargs2 = kwargs.copy() - kwargs3 = kwargs.copy() - - # For Endpoints operations (enumerating connections, getting SAS tokens) - _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config1 = AzureAIClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", - credential_scopes=["https://management.azure.com"], - **kwargs1, - ) - _policies1 = kwargs1.pop("policies", None) - if _policies1 is None: - _policies1 = [ - policies.RequestIdPolicy(**kwargs1), - self._config1.headers_policy, - self._config1.user_agent_policy, - self._config1.proxy_policy, - policies.ContentDecodePolicy(**kwargs1), - self._config1.redirect_policy, - self._config1.retry_policy, - self._config1.authentication_policy, - self._config1.custom_hook_policy, - self._config1.logging_policy, - policies.DistributedTracingPolicy(**kwargs1), - policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, - self._config1.http_logging_policy, - ] - self._client1 = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) - - # For Agents operations - _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config2 = AzureAIClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://ml.azure.com"], - **kwargs2, - ) - _policies2 = kwargs2.pop("policies", None) - if _policies2 is None: - _policies2 = [ - policies.RequestIdPolicy(**kwargs2), - self._config2.headers_policy, - self._config2.user_agent_policy, - self._config2.proxy_policy, - policies.ContentDecodePolicy(**kwargs2), - self._config2.redirect_policy, - self._config2.retry_policy, - self._config2.authentication_policy, - self._config2.custom_hook_policy, - self._config2.logging_policy, - policies.DistributedTracingPolicy(**kwargs2), - policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, - self._config2.http_logging_policy, - ] - self._client2 = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) - - # For Cloud Evaluations operations - _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config3 = AzureAIClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://ml.azure.com"], # TODO: Update once service changes are ready - **kwargs3, - ) - _policies3 = kwargs3.pop("policies", None) - if _policies3 is None: - _policies3 = [ - policies.RequestIdPolicy(**kwargs3), - self._config3.headers_policy, - self._config3.user_agent_policy, - self._config3.proxy_policy, - policies.ContentDecodePolicy(**kwargs3), - self._config3.redirect_policy, - self._config3.retry_policy, - self._config3.authentication_policy, - self._config3.custom_hook_policy, - self._config3.logging_policy, - policies.DistributedTracingPolicy(**kwargs3), - policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, - self._config3.http_logging_policy, - ] - self._client3 = PipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - - self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) - self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) - self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) - self.inference = InferenceOperations(self) - - def close(self) -> None: - self._client1.close() - self._client2.close() - self._client3.close() - - def __enter__(self) -> Self: - self._client1.__enter__() - self._client2.__enter__() - self._client3.__enter__() - return self - - def __exit__(self, *exc_details: Any) -> None: - self._client1.__exit__(*exc_details) - self._client2.__exit__(*exc_details) - self._client3.__exit__(*exc_details) - - @classmethod - def from_connection_string(cls, conn_str: str, credential: "TokenCredential", **kwargs) -> "AzureAIClient": - """ - Create an AzureAIClient from a connection string. - - :param conn_str: The connection string, copied from your AI Studio project. - """ - if not conn_str: - raise ValueError("Connection string is required") - parts = conn_str.split(";") - if len(parts) != 4: - raise ValueError("Invalid connection string format") - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) - - def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: - """Upload a file to the Azure AI Studio project. - This method required *azure-ai-ml* to be installed. - - :param file_path: The path to the file to upload. - :type file_path: Union[str, Path, PathLike] - :return: The asset id of uploaded file. - :rtype: str - """ - try: - from azure.ai.ml import MLClient - from azure.ai.ml.entities import Data - from azure.ai.ml.constants import AssetTypes - except ImportError: - raise ImportError( - "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`") - - data = Data( - path=file_path, - type=AssetTypes.URI_FILE, - name=str(uuid.uuid4()), # generating random name - is_anonymous=True, - version="1", - ) - - ml_client = MLClient( - self._config3.credential, - self._config3.subscription_id, - self._config3.resource_group_name, - self._config3.project_name, - ) - - data_asset = ml_client.data.create_or_update(data) - - return data_asset.id - - @property - def scope(self) -> Dict[str, str]: - return { - "subscription_id": self._config3.subscription_id, - "resource_group_name": self._config3.resource_group_name, - "project_name": self._config3.project_name, - } - -__all__: List[str] = [ - "AzureAIClient", -] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py b/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py deleted file mode 100644 index 480e941d758f..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/_serialization.py +++ /dev/null @@ -1,2114 +0,0 @@ -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- - -# pyright: reportUnnecessaryTypeIgnoreComment=false - -from base64 import b64decode, b64encode -import calendar -import datetime -import decimal -import email -from enum import Enum -import json -import logging -import re -import sys -import codecs -from typing import ( - Dict, - Any, - cast, - Optional, - Union, - AnyStr, - IO, - Mapping, - Callable, - TypeVar, - MutableMapping, - Type, - List, -) - -try: - from urllib import quote # type: ignore -except ImportError: - from urllib.parse import quote -import xml.etree.ElementTree as ET - -import isodate # type: ignore - -from azure.core.exceptions import DeserializationError, SerializationError -from azure.core.serialization import NULL as CoreNull - -_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") - -ModelType = TypeVar("ModelType", bound="Model") -JSON = MutableMapping[str, Any] - - -class RawDeserializer: - - # Accept "text" because we're open minded people... - JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") - - # Name used in context - CONTEXT_NAME = "deserialized_data" - - @classmethod - def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: - """Decode data according to content-type. - - Accept a stream of data as well, but will be load at once in memory for now. - - If no content-type, will return the string version (not bytes, not stream) - - :param data: Input, could be bytes or stream (will be decoded with UTF8) or text - :type data: str or bytes or IO - :param str content_type: The content type. - :return: The deserialized data. - :rtype: object - """ - if hasattr(data, "read"): - # Assume a stream - data = cast(IO, data).read() - - if isinstance(data, bytes): - data_as_str = data.decode(encoding="utf-8-sig") - else: - # Explain to mypy the correct type. - data_as_str = cast(str, data) - - # Remove Byte Order Mark if present in string - data_as_str = data_as_str.lstrip(_BOM) - - if content_type is None: - return data - - if cls.JSON_REGEXP.match(content_type): - try: - return json.loads(data_as_str) - except ValueError as err: - raise DeserializationError("JSON is invalid: {}".format(err), err) from err - elif "xml" in (content_type or []): - try: - - try: - if isinstance(data, unicode): # type: ignore - # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string - data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore - except NameError: - pass - - return ET.fromstring(data_as_str) # nosec - except ET.ParseError as err: - # It might be because the server has an issue, and returned JSON with - # content-type XML.... - # So let's try a JSON load, and if it's still broken - # let's flow the initial exception - def _json_attemp(data): - try: - return True, json.loads(data) - except ValueError: - return False, None # Don't care about this one - - success, json_result = _json_attemp(data) - if success: - return json_result - # If i'm here, it's not JSON, it's not XML, let's scream - # and raise the last context in this block (the XML exception) - # The function hack is because Py2.7 messes up with exception - # context otherwise. - _LOGGER.critical("Wasn't XML not JSON, failing") - raise DeserializationError("XML is invalid") from err - elif content_type.startswith("text/"): - return data_as_str - raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) - - @classmethod - def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: - """Deserialize from HTTP response. - - Use bytes and headers to NOT use any requests/aiohttp or whatever - specific implementation. - Headers will tested for "content-type" - - :param bytes body_bytes: The body of the response. - :param dict headers: The headers of the response. - :returns: The deserialized data. - :rtype: object - """ - # Try to use content-type from headers if available - content_type = None - if "content-type" in headers: - content_type = headers["content-type"].split(";")[0].strip().lower() - # Ouch, this server did not declare what it sent... - # Let's guess it's JSON... - # Also, since Autorest was considering that an empty body was a valid JSON, - # need that test as well.... - else: - content_type = "application/json" - - if body_bytes: - return cls.deserialize_from_text(body_bytes, content_type) - return None - - -_LOGGER = logging.getLogger(__name__) - -try: - _long_type = long # type: ignore -except NameError: - _long_type = int - - -class UTC(datetime.tzinfo): - """Time Zone info for handling UTC""" - - def utcoffset(self, dt): - """UTF offset for UTC is 0. - - :param datetime.datetime dt: The datetime - :returns: The offset - :rtype: datetime.timedelta - """ - return datetime.timedelta(0) - - def tzname(self, dt): - """Timestamp representation. - - :param datetime.datetime dt: The datetime - :returns: The timestamp representation - :rtype: str - """ - return "Z" - - def dst(self, dt): - """No daylight saving for UTC. - - :param datetime.datetime dt: The datetime - :returns: The daylight saving time - :rtype: datetime.timedelta - """ - return datetime.timedelta(hours=1) - - -try: - from datetime import timezone as _FixedOffset # type: ignore -except ImportError: # Python 2.7 - - class _FixedOffset(datetime.tzinfo): # type: ignore - """Fixed offset in minutes east from UTC. - Copy/pasted from Python doc - :param datetime.timedelta offset: offset in timedelta format - """ - - def __init__(self, offset) -> None: - self.__offset = offset - - def utcoffset(self, dt): - return self.__offset - - def tzname(self, dt): - return str(self.__offset.total_seconds() / 3600) - - def __repr__(self): - return "".format(self.tzname(None)) - - def dst(self, dt): - return datetime.timedelta(0) - - def __getinitargs__(self): - return (self.__offset,) - - -try: - from datetime import timezone - - TZ_UTC = timezone.utc -except ImportError: - TZ_UTC = UTC() # type: ignore - -_FLATTEN = re.compile(r"(? None: - self.additional_properties: Optional[Dict[str, Any]] = {} - for k in kwargs: # pylint: disable=consider-using-dict-items - if k not in self._attribute_map: - _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) - elif k in self._validation and self._validation[k].get("readonly", False): - _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) - else: - setattr(self, k, kwargs[k]) - - def __eq__(self, other: Any) -> bool: - """Compare objects by comparing all attributes. - - :param object other: The object to compare - :returns: True if objects are equal - :rtype: bool - """ - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other: Any) -> bool: - """Compare objects by comparing all attributes. - - :param object other: The object to compare - :returns: True if objects are not equal - :rtype: bool - """ - return not self.__eq__(other) - - def __str__(self) -> str: - return str(self.__dict__) - - @classmethod - def enable_additional_properties_sending(cls) -> None: - cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} - - @classmethod - def is_xml_model(cls) -> bool: - try: - cls._xml_map # type: ignore - except AttributeError: - return False - return True - - @classmethod - def _create_xml_node(cls): - """Create XML node. - - :returns: The XML node - :rtype: xml.etree.ElementTree.Element - """ - try: - xml_map = cls._xml_map # type: ignore - except AttributeError: - xml_map = {} - - return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) - - def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: - """Return the JSON that would be sent to server from this model. - - This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. - - If you want XML serialization, you can pass the kwargs is_xml=True. - - :param bool keep_readonly: If you want to serialize the readonly attributes - :returns: A dict JSON compatible object - :rtype: dict - """ - serializer = Serializer(self._infer_class_models()) - return serializer._serialize( # type: ignore # pylint: disable=protected-access - self, keep_readonly=keep_readonly, **kwargs - ) - - def as_dict( - self, - keep_readonly: bool = True, - key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, - **kwargs: Any - ) -> JSON: - """Return a dict that can be serialized using json.dump. - - Advanced usage might optionally use a callback as parameter: - - .. code::python - - def my_key_transformer(key, attr_desc, value): - return key - - Key is the attribute name used in Python. Attr_desc - is a dict of metadata. Currently contains 'type' with the - msrest type and 'key' with the RestAPI encoded key. - Value is the current value in this object. - - The string returned will be used to serialize the key. - If the return type is a list, this is considered hierarchical - result dict. - - See the three examples in this file: - - - attribute_transformer - - full_restapi_key_transformer - - last_restapi_key_transformer - - If you want XML serialization, you can pass the kwargs is_xml=True. - - :param bool keep_readonly: If you want to serialize the readonly attributes - :param function key_transformer: A key transformer function. - :returns: A dict JSON compatible object - :rtype: dict - """ - serializer = Serializer(self._infer_class_models()) - return serializer._serialize( # type: ignore # pylint: disable=protected-access - self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs - ) - - @classmethod - def _infer_class_models(cls): - try: - str_models = cls.__module__.rsplit(".", 1)[0] - models = sys.modules[str_models] - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - if cls.__name__ not in client_models: - raise ValueError("Not Autorest generated code") - except Exception: # pylint: disable=broad-exception-caught - # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. - client_models = {cls.__name__: cls} - return client_models - - @classmethod - def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: - """Parse a str using the RestAPI syntax and return a model. - - :param str data: A str using RestAPI structure. JSON by default. - :param str content_type: JSON by default, set application/xml if XML. - :returns: An instance of this model - :raises: DeserializationError if something went wrong - :rtype: ModelType - """ - deserializer = Deserializer(cls._infer_class_models()) - return deserializer(cls.__name__, data, content_type=content_type) # type: ignore - - @classmethod - def from_dict( - cls: Type[ModelType], - data: Any, - key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, - content_type: Optional[str] = None, - ) -> ModelType: - """Parse a dict using given key extractor return a model. - - By default consider key - extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor - and last_rest_key_case_insensitive_extractor) - - :param dict data: A dict using RestAPI structure - :param function key_extractors: A key extractor function. - :param str content_type: JSON by default, set application/xml if XML. - :returns: An instance of this model - :raises: DeserializationError if something went wrong - :rtype: ModelType - """ - deserializer = Deserializer(cls._infer_class_models()) - deserializer.key_extractors = ( # type: ignore - [ # type: ignore - attribute_key_case_insensitive_extractor, - rest_key_case_insensitive_extractor, - last_rest_key_case_insensitive_extractor, - ] - if key_extractors is None - else key_extractors - ) - return deserializer(cls.__name__, data, content_type=content_type) # type: ignore - - @classmethod - def _flatten_subtype(cls, key, objects): - if "_subtype_map" not in cls.__dict__: - return {} - result = dict(cls._subtype_map[key]) - for valuetype in cls._subtype_map[key].values(): - result.update(objects[valuetype]._flatten_subtype(key, objects)) # pylint: disable=protected-access - return result - - @classmethod - def _classify(cls, response, objects): - """Check the class _subtype_map for any child classes. - We want to ignore any inherited _subtype_maps. - Remove the polymorphic key from the initial data. - - :param dict response: The initial data - :param dict objects: The class objects - :returns: The class to be used - :rtype: class - """ - for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): - subtype_value = None - - if not isinstance(response, ET.Element): - rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] - subtype_value = response.pop(rest_api_response_key, None) or response.pop(subtype_key, None) - else: - subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) - if subtype_value: - # Try to match base class. Can be class name only - # (bug to fix in Autorest to support x-ms-discriminator-name) - if cls.__name__ == subtype_value: - return cls - flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) - try: - return objects[flatten_mapping_type[subtype_value]] # type: ignore - except KeyError: - _LOGGER.warning( - "Subtype value %s has no mapping, use base class %s.", - subtype_value, - cls.__name__, - ) - break - else: - _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) - break - return cls - - @classmethod - def _get_rest_key_parts(cls, attr_key): - """Get the RestAPI key of this attr, split it and decode part - :param str attr_key: Attribute key must be in attribute_map. - :returns: A list of RestAPI part - :rtype: list - """ - rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) - return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] - - -def _decode_attribute_map_key(key): - """This decode a key in an _attribute_map to the actual key we want to look at - inside the received data. - - :param str key: A key string from the generated code - :returns: The decoded key - :rtype: str - """ - return key.replace("\\.", ".") - - -class Serializer(object): # pylint: disable=too-many-public-methods - """Request object model serializer.""" - - basic_types = {str: "str", int: "int", bool: "bool", float: "float"} - - _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} - days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} - months = { - 1: "Jan", - 2: "Feb", - 3: "Mar", - 4: "Apr", - 5: "May", - 6: "Jun", - 7: "Jul", - 8: "Aug", - 9: "Sep", - 10: "Oct", - 11: "Nov", - 12: "Dec", - } - validation = { - "min_length": lambda x, y: len(x) < y, - "max_length": lambda x, y: len(x) > y, - "minimum": lambda x, y: x < y, - "maximum": lambda x, y: x > y, - "minimum_ex": lambda x, y: x <= y, - "maximum_ex": lambda x, y: x >= y, - "min_items": lambda x, y: len(x) < y, - "max_items": lambda x, y: len(x) > y, - "pattern": lambda x, y: not re.match(y, x, re.UNICODE), - "unique": lambda x, y: len(x) != len(set(x)), - "multiple": lambda x, y: x % y != 0, - } - - def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: - self.serialize_type = { - "iso-8601": Serializer.serialize_iso, - "rfc-1123": Serializer.serialize_rfc, - "unix-time": Serializer.serialize_unix, - "duration": Serializer.serialize_duration, - "date": Serializer.serialize_date, - "time": Serializer.serialize_time, - "decimal": Serializer.serialize_decimal, - "long": Serializer.serialize_long, - "bytearray": Serializer.serialize_bytearray, - "base64": Serializer.serialize_base64, - "object": self.serialize_object, - "[]": self.serialize_iter, - "{}": self.serialize_dict, - } - self.dependencies: Dict[str, type] = dict(classes) if classes else {} - self.key_transformer = full_restapi_key_transformer - self.client_side_validation = True - - def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals - self, target_obj, data_type=None, **kwargs - ): - """Serialize data into a string according to type. - - :param object target_obj: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str, dict - :raises: SerializationError if serialization fails. - :returns: The serialized data. - """ - key_transformer = kwargs.get("key_transformer", self.key_transformer) - keep_readonly = kwargs.get("keep_readonly", False) - if target_obj is None: - return None - - attr_name = None - class_name = target_obj.__class__.__name__ - - if data_type: - return self.serialize_data(target_obj, data_type, **kwargs) - - if not hasattr(target_obj, "_attribute_map"): - data_type = type(target_obj).__name__ - if data_type in self.basic_types.values(): - return self.serialize_data(target_obj, data_type, **kwargs) - - # Force "is_xml" kwargs if we detect a XML model - try: - is_xml_model_serialization = kwargs["is_xml"] - except KeyError: - is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) - - serialized = {} - if is_xml_model_serialization: - serialized = target_obj._create_xml_node() # pylint: disable=protected-access - try: - attributes = target_obj._attribute_map # pylint: disable=protected-access - for attr, attr_desc in attributes.items(): - attr_name = attr - if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access - attr_name, {} - ).get("readonly", False): - continue - - if attr_name == "additional_properties" and attr_desc["key"] == "": - if target_obj.additional_properties is not None: - serialized.update(target_obj.additional_properties) - continue - try: - - orig_attr = getattr(target_obj, attr) - if is_xml_model_serialization: - pass # Don't provide "transformer" for XML for now. Keep "orig_attr" - else: # JSON - keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) - keys = keys if isinstance(keys, list) else [keys] - - kwargs["serialization_ctxt"] = attr_desc - new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) - - if is_xml_model_serialization: - xml_desc = attr_desc.get("xml", {}) - xml_name = xml_desc.get("name", attr_desc["key"]) - xml_prefix = xml_desc.get("prefix", None) - xml_ns = xml_desc.get("ns", None) - if xml_desc.get("attr", False): - if xml_ns: - ET.register_namespace(xml_prefix, xml_ns) - xml_name = "{{{}}}{}".format(xml_ns, xml_name) - serialized.set(xml_name, new_attr) # type: ignore - continue - if xml_desc.get("text", False): - serialized.text = new_attr # type: ignore - continue - if isinstance(new_attr, list): - serialized.extend(new_attr) # type: ignore - elif isinstance(new_attr, ET.Element): - # If the down XML has no XML/Name, - # we MUST replace the tag with the local tag. But keeping the namespaces. - if "name" not in getattr(orig_attr, "_xml_map", {}): - splitted_tag = new_attr.tag.split("}") - if len(splitted_tag) == 2: # Namespace - new_attr.tag = "}".join([splitted_tag[0], xml_name]) - else: - new_attr.tag = xml_name - serialized.append(new_attr) # type: ignore - else: # That's a basic type - # Integrate namespace if necessary - local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) - local_node.text = str(new_attr) - serialized.append(local_node) # type: ignore - else: # JSON - for k in reversed(keys): # type: ignore - new_attr = {k: new_attr} - - _new_attr = new_attr - _serialized = serialized - for k in keys: # type: ignore - if k not in _serialized: - _serialized.update(_new_attr) # type: ignore - _new_attr = _new_attr[k] # type: ignore - _serialized = _serialized[k] - except ValueError as err: - if isinstance(err, SerializationError): - raise - - except (AttributeError, KeyError, TypeError) as err: - msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) - raise SerializationError(msg) from err - return serialized - - def body(self, data, data_type, **kwargs): - """Serialize data intended for a request body. - - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: dict - :raises: SerializationError if serialization fails. - :raises: ValueError if data is None - :returns: The serialized request body - """ - - # Just in case this is a dict - internal_data_type_str = data_type.strip("[]{}") - internal_data_type = self.dependencies.get(internal_data_type_str, None) - try: - is_xml_model_serialization = kwargs["is_xml"] - except KeyError: - if internal_data_type and issubclass(internal_data_type, Model): - is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) - else: - is_xml_model_serialization = False - if internal_data_type and not isinstance(internal_data_type, Enum): - try: - deserializer = Deserializer(self.dependencies) - # Since it's on serialization, it's almost sure that format is not JSON REST - # We're not able to deal with additional properties for now. - deserializer.additional_properties_detection = False - if is_xml_model_serialization: - deserializer.key_extractors = [ # type: ignore - attribute_key_case_insensitive_extractor, - ] - else: - deserializer.key_extractors = [ - rest_key_case_insensitive_extractor, - attribute_key_case_insensitive_extractor, - last_rest_key_case_insensitive_extractor, - ] - data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access - except DeserializationError as err: - raise SerializationError("Unable to build a model: " + str(err)) from err - - return self._serialize(data, data_type, **kwargs) - - def url(self, name, data, data_type, **kwargs): - """Serialize data intended for a URL path. - - :param str name: The name of the URL path parameter. - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str - :returns: The serialized URL path - :raises: TypeError if serialization fails. - :raises: ValueError if data is None - """ - try: - output = self.serialize_data(data, data_type, **kwargs) - if data_type == "bool": - output = json.dumps(output) - - if kwargs.get("skip_quote") is True: - output = str(output) - output = output.replace("{", quote("{")).replace("}", quote("}")) - else: - output = quote(str(output), safe="") - except SerializationError as exc: - raise TypeError("{} must be type {}.".format(name, data_type)) from exc - return output - - def query(self, name, data, data_type, **kwargs): - """Serialize data intended for a URL query. - - :param str name: The name of the query parameter. - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str, list - :raises: TypeError if serialization fails. - :raises: ValueError if data is None - :returns: The serialized query parameter - """ - try: - # Treat the list aside, since we don't want to encode the div separator - if data_type.startswith("["): - internal_data_type = data_type[1:-1] - do_quote = not kwargs.get("skip_quote", False) - return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) - - # Not a list, regular serialization - output = self.serialize_data(data, data_type, **kwargs) - if data_type == "bool": - output = json.dumps(output) - if kwargs.get("skip_quote") is True: - output = str(output) - else: - output = quote(str(output), safe="") - except SerializationError as exc: - raise TypeError("{} must be type {}.".format(name, data_type)) from exc - return str(output) - - def header(self, name, data, data_type, **kwargs): - """Serialize data intended for a request header. - - :param str name: The name of the header. - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str - :raises: TypeError if serialization fails. - :raises: ValueError if data is None - :returns: The serialized header - """ - try: - if data_type in ["[str]"]: - data = ["" if d is None else d for d in data] - - output = self.serialize_data(data, data_type, **kwargs) - if data_type == "bool": - output = json.dumps(output) - except SerializationError as exc: - raise TypeError("{} must be type {}.".format(name, data_type)) from exc - return str(output) - - def serialize_data(self, data, data_type, **kwargs): - """Serialize generic data according to supplied data type. - - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :raises: AttributeError if required data is None. - :raises: ValueError if data is None - :raises: SerializationError if serialization fails. - :returns: The serialized data. - :rtype: str, int, float, bool, dict, list - """ - if data is None: - raise ValueError("No value for given attribute") - - try: - if data is CoreNull: - return None - if data_type in self.basic_types.values(): - return self.serialize_basic(data, data_type, **kwargs) - - if data_type in self.serialize_type: - return self.serialize_type[data_type](data, **kwargs) - - # If dependencies is empty, try with current data class - # It has to be a subclass of Enum anyway - enum_type = self.dependencies.get(data_type, data.__class__) - if issubclass(enum_type, Enum): - return Serializer.serialize_enum(data, enum_obj=enum_type) - - iter_type = data_type[0] + data_type[-1] - if iter_type in self.serialize_type: - return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) - - except (ValueError, TypeError) as err: - msg = "Unable to serialize value: {!r} as type: {!r}." - raise SerializationError(msg.format(data, data_type)) from err - return self._serialize(data, **kwargs) - - @classmethod - def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements - custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) - if custom_serializer: - return custom_serializer - if kwargs.get("is_xml", False): - return cls._xml_basic_types_serializers.get(data_type) - - @classmethod - def serialize_basic(cls, data, data_type, **kwargs): - """Serialize basic builting data type. - Serializes objects to str, int, float or bool. - - Possible kwargs: - - basic_types_serializers dict[str, callable] : If set, use the callable as serializer - - is_xml bool : If set, use xml_basic_types_serializers - - :param obj data: Object to be serialized. - :param str data_type: Type of object in the iterable. - :rtype: str, int, float, bool - :return: serialized object - """ - custom_serializer = cls._get_custom_serializers(data_type, **kwargs) - if custom_serializer: - return custom_serializer(data) - if data_type == "str": - return cls.serialize_unicode(data) - return eval(data_type)(data) # nosec # pylint: disable=eval-used - - @classmethod - def serialize_unicode(cls, data): - """Special handling for serializing unicode strings in Py2. - Encode to UTF-8 if unicode, otherwise handle as a str. - - :param str data: Object to be serialized. - :rtype: str - :return: serialized object - """ - try: # If I received an enum, return its value - return data.value - except AttributeError: - pass - - try: - if isinstance(data, unicode): # type: ignore - # Don't change it, JSON and XML ElementTree are totally able - # to serialize correctly u'' strings - return data - except NameError: - return str(data) - return str(data) - - def serialize_iter(self, data, iter_type, div=None, **kwargs): - """Serialize iterable. - - Supported kwargs: - - serialization_ctxt dict : The current entry of _attribute_map, or same format. - serialization_ctxt['type'] should be same as data_type. - - is_xml bool : If set, serialize as XML - - :param list data: Object to be serialized. - :param str iter_type: Type of object in the iterable. - :param str div: If set, this str will be used to combine the elements - in the iterable into a combined string. Default is 'None'. - Defaults to False. - :rtype: list, str - :return: serialized iterable - """ - if isinstance(data, str): - raise SerializationError("Refuse str type as a valid iter type.") - - serialization_ctxt = kwargs.get("serialization_ctxt", {}) - is_xml = kwargs.get("is_xml", False) - - serialized = [] - for d in data: - try: - serialized.append(self.serialize_data(d, iter_type, **kwargs)) - except ValueError as err: - if isinstance(err, SerializationError): - raise - serialized.append(None) - - if kwargs.get("do_quote", False): - serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] - - if div: - serialized = ["" if s is None else str(s) for s in serialized] - serialized = div.join(serialized) - - if "xml" in serialization_ctxt or is_xml: - # XML serialization is more complicated - xml_desc = serialization_ctxt.get("xml", {}) - xml_name = xml_desc.get("name") - if not xml_name: - xml_name = serialization_ctxt["key"] - - # Create a wrap node if necessary (use the fact that Element and list have "append") - is_wrapped = xml_desc.get("wrapped", False) - node_name = xml_desc.get("itemsName", xml_name) - if is_wrapped: - final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) - else: - final_result = [] - # All list elements to "local_node" - for el in serialized: - if isinstance(el, ET.Element): - el_node = el - else: - el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) - if el is not None: # Otherwise it writes "None" :-p - el_node.text = str(el) - final_result.append(el_node) - return final_result - return serialized - - def serialize_dict(self, attr, dict_type, **kwargs): - """Serialize a dictionary of objects. - - :param dict attr: Object to be serialized. - :param str dict_type: Type of object in the dictionary. - :rtype: dict - :return: serialized dictionary - """ - serialization_ctxt = kwargs.get("serialization_ctxt", {}) - serialized = {} - for key, value in attr.items(): - try: - serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) - except ValueError as err: - if isinstance(err, SerializationError): - raise - serialized[self.serialize_unicode(key)] = None - - if "xml" in serialization_ctxt: - # XML serialization is more complicated - xml_desc = serialization_ctxt["xml"] - xml_name = xml_desc["name"] - - final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) - for key, value in serialized.items(): - ET.SubElement(final_result, key).text = value - return final_result - - return serialized - - def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements - """Serialize a generic object. - This will be handled as a dictionary. If object passed in is not - a basic type (str, int, float, dict, list) it will simply be - cast to str. - - :param dict attr: Object to be serialized. - :rtype: dict or str - :return: serialized object - """ - if attr is None: - return None - if isinstance(attr, ET.Element): - return attr - obj_type = type(attr) - if obj_type in self.basic_types: - return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) - if obj_type is _long_type: - return self.serialize_long(attr) - if obj_type is str: - return self.serialize_unicode(attr) - if obj_type is datetime.datetime: - return self.serialize_iso(attr) - if obj_type is datetime.date: - return self.serialize_date(attr) - if obj_type is datetime.time: - return self.serialize_time(attr) - if obj_type is datetime.timedelta: - return self.serialize_duration(attr) - if obj_type is decimal.Decimal: - return self.serialize_decimal(attr) - - # If it's a model or I know this dependency, serialize as a Model - if obj_type in self.dependencies.values() or isinstance(attr, Model): - return self._serialize(attr) - - if obj_type == dict: - serialized = {} - for key, value in attr.items(): - try: - serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) - except ValueError: - serialized[self.serialize_unicode(key)] = None - return serialized - - if obj_type == list: - serialized = [] - for obj in attr: - try: - serialized.append(self.serialize_object(obj, **kwargs)) - except ValueError: - pass - return serialized - return str(attr) - - @staticmethod - def serialize_enum(attr, enum_obj=None): - try: - result = attr.value - except AttributeError: - result = attr - try: - enum_obj(result) # type: ignore - return result - except ValueError as exc: - for enum_value in enum_obj: # type: ignore - if enum_value.value.lower() == str(attr).lower(): - return enum_value.value - error = "{!r} is not valid value for enum {!r}" - raise SerializationError(error.format(attr, enum_obj)) from exc - - @staticmethod - def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument - """Serialize bytearray into base-64 string. - - :param str attr: Object to be serialized. - :rtype: str - :return: serialized base64 - """ - return b64encode(attr).decode() - - @staticmethod - def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument - """Serialize str into base-64 string. - - :param str attr: Object to be serialized. - :rtype: str - :return: serialized base64 - """ - encoded = b64encode(attr).decode("ascii") - return encoded.strip("=").replace("+", "-").replace("/", "_") - - @staticmethod - def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Decimal object to float. - - :param decimal attr: Object to be serialized. - :rtype: float - :return: serialized decimal - """ - return float(attr) - - @staticmethod - def serialize_long(attr, **kwargs): # pylint: disable=unused-argument - """Serialize long (Py2) or int (Py3). - - :param int attr: Object to be serialized. - :rtype: int/long - :return: serialized long - """ - return _long_type(attr) - - @staticmethod - def serialize_date(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Date object into ISO-8601 formatted string. - - :param Date attr: Object to be serialized. - :rtype: str - :return: serialized date - """ - if isinstance(attr, str): - attr = isodate.parse_date(attr) - t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) - return t - - @staticmethod - def serialize_time(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Time object into ISO-8601 formatted string. - - :param datetime.time attr: Object to be serialized. - :rtype: str - :return: serialized time - """ - if isinstance(attr, str): - attr = isodate.parse_time(attr) - t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) - if attr.microsecond: - t += ".{:02}".format(attr.microsecond) - return t - - @staticmethod - def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument - """Serialize TimeDelta object into ISO-8601 formatted string. - - :param TimeDelta attr: Object to be serialized. - :rtype: str - :return: serialized duration - """ - if isinstance(attr, str): - attr = isodate.parse_duration(attr) - return isodate.duration_isoformat(attr) - - @staticmethod - def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Datetime object into RFC-1123 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: TypeError if format invalid. - :return: serialized rfc - """ - try: - if not attr.tzinfo: - _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") - utc = attr.utctimetuple() - except AttributeError as exc: - raise TypeError("RFC1123 object must be valid Datetime object.") from exc - - return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( - Serializer.days[utc.tm_wday], - utc.tm_mday, - Serializer.months[utc.tm_mon], - utc.tm_year, - utc.tm_hour, - utc.tm_min, - utc.tm_sec, - ) - - @staticmethod - def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: SerializationError if format invalid. - :return: serialized iso - """ - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - if not attr.tzinfo: - _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") - if microseconds: - microseconds = "." + microseconds - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec - ) - return date + microseconds + "Z" - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise SerializationError(msg) from err - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise TypeError(msg) from err - - @staticmethod - def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Datetime object into IntTime format. - This is represented as seconds. - - :param Datetime attr: Object to be serialized. - :rtype: int - :raises: SerializationError if format invalid - :return: serialied unix - """ - if isinstance(attr, int): - return attr - try: - if not attr.tzinfo: - _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") - return int(calendar.timegm(attr.utctimetuple())) - except AttributeError as exc: - raise TypeError("Unix time object must be valid Datetime object.") from exc - - -def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument - key = attr_desc["key"] - working_data = data - - while "." in key: - # Need the cast, as for some reasons "split" is typed as list[str | Any] - dict_keys = cast(List[str], _FLATTEN.split(key)) - if len(dict_keys) == 1: - key = _decode_attribute_map_key(dict_keys[0]) - break - working_key = _decode_attribute_map_key(dict_keys[0]) - working_data = working_data.get(working_key, data) - if working_data is None: - # If at any point while following flatten JSON path see None, it means - # that all properties under are None as well - return None - key = ".".join(dict_keys[1:]) - - return working_data.get(key) - - -def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements - attr, attr_desc, data -): - key = attr_desc["key"] - working_data = data - - while "." in key: - dict_keys = _FLATTEN.split(key) - if len(dict_keys) == 1: - key = _decode_attribute_map_key(dict_keys[0]) - break - working_key = _decode_attribute_map_key(dict_keys[0]) - working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) - if working_data is None: - # If at any point while following flatten JSON path see None, it means - # that all properties under are None as well - return None - key = ".".join(dict_keys[1:]) - - if working_data: - return attribute_key_case_insensitive_extractor(key, None, working_data) - - -def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument - """Extract the attribute in "data" based on the last part of the JSON path key. - - :param str attr: The attribute to extract - :param dict attr_desc: The attribute description - :param dict data: The data to extract from - :rtype: object - :returns: The extracted attribute - """ - key = attr_desc["key"] - dict_keys = _FLATTEN.split(key) - return attribute_key_extractor(dict_keys[-1], None, data) - - -def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument - """Extract the attribute in "data" based on the last part of the JSON path key. - - This is the case insensitive version of "last_rest_key_extractor" - :param str attr: The attribute to extract - :param dict attr_desc: The attribute description - :param dict data: The data to extract from - :rtype: object - :returns: The extracted attribute - """ - key = attr_desc["key"] - dict_keys = _FLATTEN.split(key) - return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) - - -def attribute_key_extractor(attr, _, data): - return data.get(attr) - - -def attribute_key_case_insensitive_extractor(attr, _, data): - found_key = None - lower_attr = attr.lower() - for key in data: - if lower_attr == key.lower(): - found_key = key - break - - return data.get(found_key) - - -def _extract_name_from_internal_type(internal_type): - """Given an internal type XML description, extract correct XML name with namespace. - - :param dict internal_type: An model type - :rtype: tuple - :returns: A tuple XML name + namespace dict - """ - internal_type_xml_map = getattr(internal_type, "_xml_map", {}) - xml_name = internal_type_xml_map.get("name", internal_type.__name__) - xml_ns = internal_type_xml_map.get("ns", None) - if xml_ns: - xml_name = "{{{}}}{}".format(xml_ns, xml_name) - return xml_name - - -def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements - if isinstance(data, dict): - return None - - # Test if this model is XML ready first - if not isinstance(data, ET.Element): - return None - - xml_desc = attr_desc.get("xml", {}) - xml_name = xml_desc.get("name", attr_desc["key"]) - - # Look for a children - is_iter_type = attr_desc["type"].startswith("[") - is_wrapped = xml_desc.get("wrapped", False) - internal_type = attr_desc.get("internalType", None) - internal_type_xml_map = getattr(internal_type, "_xml_map", {}) - - # Integrate namespace if necessary - xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) - if xml_ns: - xml_name = "{{{}}}{}".format(xml_ns, xml_name) - - # If it's an attribute, that's simple - if xml_desc.get("attr", False): - return data.get(xml_name) - - # If it's x-ms-text, that's simple too - if xml_desc.get("text", False): - return data.text - - # Scenario where I take the local name: - # - Wrapped node - # - Internal type is an enum (considered basic types) - # - Internal type has no XML/Name node - if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): - children = data.findall(xml_name) - # If internal type has a local name and it's not a list, I use that name - elif not is_iter_type and internal_type and "name" in internal_type_xml_map: - xml_name = _extract_name_from_internal_type(internal_type) - children = data.findall(xml_name) - # That's an array - else: - if internal_type: # Complex type, ignore itemsName and use the complex type name - items_name = _extract_name_from_internal_type(internal_type) - else: - items_name = xml_desc.get("itemsName", xml_name) - children = data.findall(items_name) - - if len(children) == 0: - if is_iter_type: - if is_wrapped: - return None # is_wrapped no node, we want None - return [] # not wrapped, assume empty list - return None # Assume it's not there, maybe an optional node. - - # If is_iter_type and not wrapped, return all found children - if is_iter_type: - if not is_wrapped: - return children - # Iter and wrapped, should have found one node only (the wrap one) - if len(children) != 1: - raise DeserializationError( - "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( # pylint: disable=line-too-long - xml_name - ) - ) - return list(children[0]) # Might be empty list and that's ok. - - # Here it's not a itertype, we should have found one element only or empty - if len(children) > 1: - raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) - return children[0] - - -class Deserializer(object): - """Response object model deserializer. - - :param dict classes: Class type dictionary for deserializing complex types. - :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. - """ - - basic_types = {str: "str", int: "int", bool: "bool", float: "float"} - - valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") - - def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: - self.deserialize_type = { - "iso-8601": Deserializer.deserialize_iso, - "rfc-1123": Deserializer.deserialize_rfc, - "unix-time": Deserializer.deserialize_unix, - "duration": Deserializer.deserialize_duration, - "date": Deserializer.deserialize_date, - "time": Deserializer.deserialize_time, - "decimal": Deserializer.deserialize_decimal, - "long": Deserializer.deserialize_long, - "bytearray": Deserializer.deserialize_bytearray, - "base64": Deserializer.deserialize_base64, - "object": self.deserialize_object, - "[]": self.deserialize_iter, - "{}": self.deserialize_dict, - } - self.deserialize_expected_types = { - "duration": (isodate.Duration, datetime.timedelta), - "iso-8601": (datetime.datetime), - } - self.dependencies: Dict[str, type] = dict(classes) if classes else {} - self.key_extractors = [rest_key_extractor, xml_key_extractor] - # Additional properties only works if the "rest_key_extractor" is used to - # extract the keys. Making it to work whatever the key extractor is too much - # complicated, with no real scenario for now. - # So adding a flag to disable additional properties detection. This flag should be - # used if your expect the deserialization to NOT come from a JSON REST syntax. - # Otherwise, result are unexpected - self.additional_properties_detection = True - - def __call__(self, target_obj, response_data, content_type=None): - """Call the deserializer to process a REST response. - - :param str target_obj: Target data type to deserialize to. - :param requests.Response response_data: REST response object. - :param str content_type: Swagger "produces" if available. - :raises: DeserializationError if deserialization fails. - :return: Deserialized object. - :rtype: object - """ - data = self._unpack_content(response_data, content_type) - return self._deserialize(target_obj, data) - - def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements - """Call the deserializer on a model. - - Data needs to be already deserialized as JSON or XML ElementTree - - :param str target_obj: Target data type to deserialize to. - :param object data: Object to deserialize. - :raises: DeserializationError if deserialization fails. - :return: Deserialized object. - :rtype: object - """ - # This is already a model, go recursive just in case - if hasattr(data, "_attribute_map"): - constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] - try: - for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access - if attr in constants: - continue - value = getattr(data, attr) - if value is None: - continue - local_type = mapconfig["type"] - internal_data_type = local_type.strip("[]{}") - if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): - continue - setattr(data, attr, self._deserialize(local_type, value)) - return data - except AttributeError: - return - - response, class_name = self._classify_target(target_obj, data) - - if isinstance(response, str): - return self.deserialize_data(data, response) - if isinstance(response, type) and issubclass(response, Enum): - return self.deserialize_enum(data, response) - - if data is None or data is CoreNull: - return data - try: - attributes = response._attribute_map # type: ignore # pylint: disable=protected-access - d_attrs = {} - for attr, attr_desc in attributes.items(): - # Check empty string. If it's not empty, someone has a real "additionalProperties"... - if attr == "additional_properties" and attr_desc["key"] == "": - continue - raw_value = None - # Enhance attr_desc with some dynamic data - attr_desc = attr_desc.copy() # Do a copy, do not change the real one - internal_data_type = attr_desc["type"].strip("[]{}") - if internal_data_type in self.dependencies: - attr_desc["internalType"] = self.dependencies[internal_data_type] - - for key_extractor in self.key_extractors: - found_value = key_extractor(attr, attr_desc, data) - if found_value is not None: - if raw_value is not None and raw_value != found_value: - msg = ( - "Ignoring extracted value '%s' from %s for key '%s'" - " (duplicate extraction, follow extractors order)" - ) - _LOGGER.warning(msg, found_value, key_extractor, attr) - continue - raw_value = found_value - - value = self.deserialize_data(raw_value, attr_desc["type"]) - d_attrs[attr] = value - except (AttributeError, TypeError, KeyError) as err: - msg = "Unable to deserialize to object: " + class_name # type: ignore - raise DeserializationError(msg) from err - additional_properties = self._build_additional_properties(attributes, data) - return self._instantiate_model(response, d_attrs, additional_properties) - - def _build_additional_properties(self, attribute_map, data): - if not self.additional_properties_detection: - return None - if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": - # Check empty string. If it's not empty, someone has a real "additionalProperties" - return None - if isinstance(data, ET.Element): - data = {el.tag: el.text for el in data} - - known_keys = { - _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) - for desc in attribute_map.values() - if desc["key"] != "" - } - present_keys = set(data.keys()) - missing_keys = present_keys - known_keys - return {key: data[key] for key in missing_keys} - - def _classify_target(self, target, data): - """Check to see whether the deserialization target object can - be classified into a subclass. - Once classification has been determined, initialize object. - - :param str target: The target object type to deserialize to. - :param str/dict data: The response data to deserialize. - :return: The classified target object and its class name. - :rtype: tuple - """ - if target is None: - return None, None - - if isinstance(target, str): - try: - target = self.dependencies[target] - except KeyError: - return target, target - - try: - target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access - except AttributeError: - pass # Target is not a Model, no classify - return target, target.__class__.__name__ # type: ignore - - def failsafe_deserialize(self, target_obj, data, content_type=None): - """Ignores any errors encountered in deserialization, - and falls back to not deserializing the object. Recommended - for use in error deserialization, as we want to return the - HttpResponseError to users, and not have them deal with - a deserialization error. - - :param str target_obj: The target object type to deserialize to. - :param str/dict data: The response data to deserialize. - :param str content_type: Swagger "produces" if available. - :return: Deserialized object. - :rtype: object - """ - try: - return self(target_obj, data, content_type=content_type) - except: # pylint: disable=bare-except - _LOGGER.debug( - "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True - ) - return None - - @staticmethod - def _unpack_content(raw_data, content_type=None): - """Extract the correct structure for deserialization. - - If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. - if we can't, raise. Your Pipeline should have a RawDeserializer. - - If not a pipeline response and raw_data is bytes or string, use content-type - to decode it. If no content-type, try JSON. - - If raw_data is something else, bypass all logic and return it directly. - - :param obj raw_data: Data to be processed. - :param str content_type: How to parse if raw_data is a string/bytes. - :raises JSONDecodeError: If JSON is requested and parsing is impossible. - :raises UnicodeDecodeError: If bytes is not UTF8 - :rtype: object - :return: Unpacked content. - """ - # Assume this is enough to detect a Pipeline Response without importing it - context = getattr(raw_data, "context", {}) - if context: - if RawDeserializer.CONTEXT_NAME in context: - return context[RawDeserializer.CONTEXT_NAME] - raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") - - # Assume this is enough to recognize universal_http.ClientResponse without importing it - if hasattr(raw_data, "body"): - return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) - - # Assume this enough to recognize requests.Response without importing it. - if hasattr(raw_data, "_content_consumed"): - return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) - - if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): - return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore - return raw_data - - def _instantiate_model(self, response, attrs, additional_properties=None): - """Instantiate a response model passing in deserialized args. - - :param Response response: The response model class. - :param dict attrs: The deserialized response attributes. - :param dict additional_properties: Additional properties to be set. - :rtype: Response - :return: The instantiated response model. - """ - if callable(response): - subtype = getattr(response, "_subtype_map", {}) - try: - readonly = [ - k for k, v in response._validation.items() if v.get("readonly") # pylint: disable=protected-access - ] - const = [ - k for k, v in response._validation.items() if v.get("constant") # pylint: disable=protected-access - ] - kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} - response_obj = response(**kwargs) - for attr in readonly: - setattr(response_obj, attr, attrs.get(attr)) - if additional_properties: - response_obj.additional_properties = additional_properties - return response_obj - except TypeError as err: - msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore - raise DeserializationError(msg + str(err)) from err - else: - try: - for attr, value in attrs.items(): - setattr(response, attr, value) - return response - except Exception as exp: - msg = "Unable to populate response model. " - msg += "Type: {}, Error: {}".format(type(response), exp) - raise DeserializationError(msg) from exp - - def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements - """Process data for deserialization according to data type. - - :param str data: The response string to be deserialized. - :param str data_type: The type to deserialize to. - :raises: DeserializationError if deserialization fails. - :return: Deserialized object. - :rtype: object - """ - if data is None: - return data - - try: - if not data_type: - return data - if data_type in self.basic_types.values(): - return self.deserialize_basic(data, data_type) - if data_type in self.deserialize_type: - if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): - return data - - is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment - "object", - "[]", - r"{}", - ] - if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: - return None - data_val = self.deserialize_type[data_type](data) - return data_val - - iter_type = data_type[0] + data_type[-1] - if iter_type in self.deserialize_type: - return self.deserialize_type[iter_type](data, data_type[1:-1]) - - obj_type = self.dependencies[data_type] - if issubclass(obj_type, Enum): - if isinstance(data, ET.Element): - data = data.text - return self.deserialize_enum(data, obj_type) - - except (ValueError, TypeError, AttributeError) as err: - msg = "Unable to deserialize response data." - msg += " Data: {}, {}".format(data, data_type) - raise DeserializationError(msg) from err - return self._deserialize(obj_type, data) - - def deserialize_iter(self, attr, iter_type): - """Deserialize an iterable. - - :param list attr: Iterable to be deserialized. - :param str iter_type: The type of object in the iterable. - :return: Deserialized iterable. - :rtype: list - """ - if attr is None: - return None - if isinstance(attr, ET.Element): # If I receive an element here, get the children - attr = list(attr) - if not isinstance(attr, (list, set)): - raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) - return [self.deserialize_data(a, iter_type) for a in attr] - - def deserialize_dict(self, attr, dict_type): - """Deserialize a dictionary. - - :param dict/list attr: Dictionary to be deserialized. Also accepts - a list of key, value pairs. - :param str dict_type: The object type of the items in the dictionary. - :return: Deserialized dictionary. - :rtype: dict - """ - if isinstance(attr, list): - return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} - - if isinstance(attr, ET.Element): - # Transform value into {"Key": "value"} - attr = {el.tag: el.text for el in attr} - return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} - - def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements - """Deserialize a generic object. - This will be handled as a dictionary. - - :param dict attr: Dictionary to be deserialized. - :return: Deserialized object. - :rtype: dict - :raises: TypeError if non-builtin datatype encountered. - """ - if attr is None: - return None - if isinstance(attr, ET.Element): - # Do no recurse on XML, just return the tree as-is - return attr - if isinstance(attr, str): - return self.deserialize_basic(attr, "str") - obj_type = type(attr) - if obj_type in self.basic_types: - return self.deserialize_basic(attr, self.basic_types[obj_type]) - if obj_type is _long_type: - return self.deserialize_long(attr) - - if obj_type == dict: - deserialized = {} - for key, value in attr.items(): - try: - deserialized[key] = self.deserialize_object(value, **kwargs) - except ValueError: - deserialized[key] = None - return deserialized - - if obj_type == list: - deserialized = [] - for obj in attr: - try: - deserialized.append(self.deserialize_object(obj, **kwargs)) - except ValueError: - pass - return deserialized - - error = "Cannot deserialize generic object with type: " - raise TypeError(error + str(obj_type)) - - def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements - """Deserialize basic builtin data type from string. - Will attempt to convert to str, int, float and bool. - This function will also accept '1', '0', 'true' and 'false' as - valid bool values. - - :param str attr: response string to be deserialized. - :param str data_type: deserialization data type. - :return: Deserialized basic type. - :rtype: str, int, float or bool - :raises: TypeError if string format is not valid. - """ - # If we're here, data is supposed to be a basic type. - # If it's still an XML node, take the text - if isinstance(attr, ET.Element): - attr = attr.text - if not attr: - if data_type == "str": - # None or '', node is empty string. - return "" - # None or '', node with a strong type is None. - # Don't try to model "empty bool" or "empty int" - return None - - if data_type == "bool": - if attr in [True, False, 1, 0]: - return bool(attr) - if isinstance(attr, str): - if attr.lower() in ["true", "1"]: - return True - if attr.lower() in ["false", "0"]: - return False - raise TypeError("Invalid boolean value: {}".format(attr)) - - if data_type == "str": - return self.deserialize_unicode(attr) - return eval(data_type)(attr) # nosec # pylint: disable=eval-used - - @staticmethod - def deserialize_unicode(data): - """Preserve unicode objects in Python 2, otherwise return data - as a string. - - :param str data: response string to be deserialized. - :return: Deserialized string. - :rtype: str or unicode - """ - # We might be here because we have an enum modeled as string, - # and we try to deserialize a partial dict with enum inside - if isinstance(data, Enum): - return data - - # Consider this is real string - try: - if isinstance(data, unicode): # type: ignore - return data - except NameError: - return str(data) - return str(data) - - @staticmethod - def deserialize_enum(data, enum_obj): - """Deserialize string into enum object. - - If the string is not a valid enum value it will be returned as-is - and a warning will be logged. - - :param str data: Response string to be deserialized. If this value is - None or invalid it will be returned as-is. - :param Enum enum_obj: Enum object to deserialize to. - :return: Deserialized enum object. - :rtype: Enum - """ - if isinstance(data, enum_obj) or data is None: - return data - if isinstance(data, Enum): - data = data.value - if isinstance(data, int): - # Workaround. We might consider remove it in the future. - try: - return list(enum_obj.__members__.values())[data] - except IndexError as exc: - error = "{!r} is not a valid index for enum {!r}" - raise DeserializationError(error.format(data, enum_obj)) from exc - try: - return enum_obj(str(data)) - except ValueError: - for enum_value in enum_obj: - if enum_value.value.lower() == str(data).lower(): - return enum_value - # We don't fail anymore for unknown value, we deserialize as a string - _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) - return Deserializer.deserialize_unicode(data) - - @staticmethod - def deserialize_bytearray(attr): - """Deserialize string into bytearray. - - :param str attr: response string to be deserialized. - :return: Deserialized bytearray - :rtype: bytearray - :raises: TypeError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - return bytearray(b64decode(attr)) # type: ignore - - @staticmethod - def deserialize_base64(attr): - """Deserialize base64 encoded string into string. - - :param str attr: response string to be deserialized. - :return: Deserialized base64 string - :rtype: bytearray - :raises: TypeError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore - attr = attr + padding # type: ignore - encoded = attr.replace("-", "+").replace("_", "/") - return b64decode(encoded) - - @staticmethod - def deserialize_decimal(attr): - """Deserialize string into Decimal object. - - :param str attr: response string to be deserialized. - :return: Deserialized decimal - :raises: DeserializationError if string format invalid. - :rtype: decimal - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - return decimal.Decimal(str(attr)) # type: ignore - except decimal.DecimalException as err: - msg = "Invalid decimal {}".format(attr) - raise DeserializationError(msg) from err - - @staticmethod - def deserialize_long(attr): - """Deserialize string into long (Py2) or int (Py3). - - :param str attr: response string to be deserialized. - :return: Deserialized int - :rtype: long or int - :raises: ValueError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - return _long_type(attr) # type: ignore - - @staticmethod - def deserialize_duration(attr): - """Deserialize ISO-8601 formatted string into TimeDelta object. - - :param str attr: response string to be deserialized. - :return: Deserialized duration - :rtype: TimeDelta - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - duration = isodate.parse_duration(attr) - except (ValueError, OverflowError, AttributeError) as err: - msg = "Cannot deserialize duration object." - raise DeserializationError(msg) from err - return duration - - @staticmethod - def deserialize_date(attr): - """Deserialize ISO-8601 formatted string into Date object. - - :param str attr: response string to be deserialized. - :return: Deserialized date - :rtype: Date - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore - raise DeserializationError("Date must have only digits and -. Received: %s" % attr) - # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. - return isodate.parse_date(attr, defaultmonth=0, defaultday=0) - - @staticmethod - def deserialize_time(attr): - """Deserialize ISO-8601 formatted string into time object. - - :param str attr: response string to be deserialized. - :return: Deserialized time - :rtype: datetime.time - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore - raise DeserializationError("Date must have only digits and -. Received: %s" % attr) - return isodate.parse_time(attr) - - @staticmethod - def deserialize_rfc(attr): - """Deserialize RFC-1123 formatted string into Datetime object. - - :param str attr: response string to be deserialized. - :return: Deserialized RFC datetime - :rtype: Datetime - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - parsed_date = email.utils.parsedate_tz(attr) # type: ignore - date_obj = datetime.datetime( - *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) - ) - if not date_obj.tzinfo: - date_obj = date_obj.astimezone(tz=TZ_UTC) - except ValueError as err: - msg = "Cannot deserialize to rfc datetime object." - raise DeserializationError(msg) from err - return date_obj - - @staticmethod - def deserialize_iso(attr): - """Deserialize ISO-8601 formatted string into Datetime object. - - :param str attr: response string to be deserialized. - :return: Deserialized ISO datetime - :rtype: Datetime - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - attr = attr.upper() # type: ignore - match = Deserializer.valid_date.match(attr) - if not match: - raise ValueError("Invalid datetime string: " + attr) - - check_decimal = attr.split(".") - if len(check_decimal) > 1: - decimal_str = "" - for digit in check_decimal[1]: - if digit.isdigit(): - decimal_str += digit - else: - break - if len(decimal_str) > 6: - attr = attr.replace(decimal_str, decimal_str[0:6]) - - date_obj = isodate.parse_datetime(attr) - test_utc = date_obj.utctimetuple() - if test_utc.tm_year > 9999 or test_utc.tm_year < 1: - raise OverflowError("Hit max or min date") - except (ValueError, OverflowError, AttributeError) as err: - msg = "Cannot deserialize datetime object." - raise DeserializationError(msg) from err - return date_obj - - @staticmethod - def deserialize_unix(attr): - """Serialize Datetime object into IntTime format. - This is represented as seconds. - - :param int attr: Object to be serialized. - :return: Deserialized datetime - :rtype: Datetime - :raises: DeserializationError if format invalid - """ - if isinstance(attr, ET.Element): - attr = int(attr.text) # type: ignore - try: - attr = int(attr) - date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) - except ValueError as err: - msg = "Cannot deserialize to unix datetime object." - raise DeserializationError(msg) from err - return date_obj diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_types.py b/sdk/ai/azure-ai-client/azure/ai/client/_types.py deleted file mode 100644 index c438829bda41..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/_types.py +++ /dev/null @@ -1,18 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING, Union - -if TYPE_CHECKING: - from . import models as _models - from .. import models as _models -AgentsApiResponseFormatOption = Union[ - str, str, "_models.AgentsApiResponseFormatMode", "_models.AgentsApiResponseFormat" -] -MessageAttachmentToolDefinition = Union["_models.CodeInterpreterToolDefinition", "_models.FileSearchToolDefinition"] -AgentsApiToolChoiceOption = Union[str, str, "_models.AgentsApiToolChoiceOptionMode", "_models.AgentsNamedToolChoice"] diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_vendor.py b/sdk/ai/azure-ai-client/azure/ai/client/_vendor.py deleted file mode 100644 index e6f010934827..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/_vendor.py +++ /dev/null @@ -1,50 +0,0 @@ -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import json -from typing import Any, Dict, IO, List, Mapping, Optional, Tuple, Union - -from ._model_base import Model, SdkJSONEncoder - - -# file-like tuple could be `(filename, IO (or bytes))` or `(filename, IO (or bytes), content_type)` -FileContent = Union[str, bytes, IO[str], IO[bytes]] - -FileType = Union[ - # file (or bytes) - FileContent, - # (filename, file (or bytes)) - Tuple[Optional[str], FileContent], - # (filename, file (or bytes), content_type) - Tuple[Optional[str], FileContent, Optional[str]], -] - - -def serialize_multipart_data_entry(data_entry: Any) -> Any: - if isinstance(data_entry, (list, tuple, dict, Model)): - return json.dumps(data_entry, cls=SdkJSONEncoder, exclude_readonly=True) - return data_entry - - -def prepare_multipart_form_data( - body: Mapping[str, Any], multipart_fields: List[str], data_fields: List[str] -) -> Tuple[List[FileType], Dict[str, Any]]: - files: List[FileType] = [] - data: Dict[str, Any] = {} - for multipart_field in multipart_fields: - multipart_entry = body.get(multipart_field) - if isinstance(multipart_entry, list): - files.extend([(multipart_field, e) for e in multipart_entry]) - elif multipart_entry: - files.append((multipart_field, multipart_entry)) - - for data_field in data_fields: - data_entry = body.get(data_field) - if data_entry: - data[data_field] = serialize_multipart_data_entry(data_entry) - - return files, data diff --git a/sdk/ai/azure-ai-client/azure/ai/client/_version.py b/sdk/ai/azure-ai-client/azure/ai/client/_version.py deleted file mode 100644 index be71c81bd282..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/_version.py +++ /dev/null @@ -1,9 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -VERSION = "1.0.0b1" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py deleted file mode 100644 index 773c2c5dc6e9..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._client import AzureAIClient - -try: - from ._patch import __all__ as _patch_all - from ._patch import * # pylint: disable=unused-wildcard-import -except ImportError: - _patch_all = [] -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "AzureAIClient", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore - -_patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py deleted file mode 100644 index 64cba8c69b6b..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_client.py +++ /dev/null @@ -1,139 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any, Awaitable, TYPE_CHECKING -from typing_extensions import Self - -from azure.core import AsyncPipelineClient -from azure.core.pipeline import policies -from azure.core.rest import AsyncHttpResponse, HttpRequest - -from .._serialization import Deserializer, Serializer -from ._configuration import AzureAIClientConfiguration -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations - -if TYPE_CHECKING: - from azure.core.credentials_async import AsyncTokenCredential - - -class AzureAIClient: - """AzureAIClient. - - :ivar agents: AgentsOperations operations - :vartype agents: azure.ai.client.aio.operations.AgentsOperations - :ivar connections: ConnectionsOperations operations - :vartype connections: azure.ai.client.aio.operations.ConnectionsOperations - :ivar evaluations: EvaluationsOperations operations - :vartype evaluations: azure.ai.client.aio.operations.EvaluationsOperations - :param endpoint: The Azure AI Studio project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``\\\\ , where - :code:`` is the Azure region where the project is deployed (e.g. westus) and - :code:`` is the GUID of the Enterprise private link. Required. - :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Studio project name. Required. - :type project_name: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-07-01-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "AsyncTokenCredential", - **kwargs: Any - ) -> None: - _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" # pylint: disable=line-too-long - self._config = AzureAIClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - **kwargs - ) - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) - self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) - self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) - - def send_request( - self, request: HttpRequest, *, stream: bool = False, **kwargs: Any - ) -> Awaitable[AsyncHttpResponse]: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = await client.send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.AsyncHttpResponse - """ - - request_copy = deepcopy(request) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - - request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) - return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> Self: - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details: Any) -> None: - await self._client.__aexit__(*exc_details) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py deleted file mode 100644 index 8356c07ba7ee..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_configuration.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline import policies - -from .._version import VERSION - -if TYPE_CHECKING: - from azure.core.credentials_async import AsyncTokenCredential - - -class AzureAIClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for AzureAIClient. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param endpoint: The Azure AI Studio project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``\\ , where :code:`` - is the Azure region where the project is deployed (e.g. westus) and :code:`` - is the GUID of the Enterprise private link. Required. - :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Studio project name. Required. - :type project_name: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-07-01-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "AsyncTokenCredential", - **kwargs: Any - ) -> None: - api_version: str = kwargs.pop("api_version", "2024-07-01-preview") - - if endpoint is None: - raise ValueError("Parameter 'endpoint' must not be None.") - if subscription_id is None: - raise ValueError("Parameter 'subscription_id' must not be None.") - if resource_group_name is None: - raise ValueError("Parameter 'resource_group_name' must not be None.") - if project_name is None: - raise ValueError("Parameter 'project_name' must not be None.") - if credential is None: - raise ValueError("Parameter 'credential' must not be None.") - - self.endpoint = endpoint - self.subscription_id = subscription_id - self.resource_group_name = resource_group_name - self.project_name = project_name - self.credential = credential - self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) - kwargs.setdefault("sdk_moniker", "ai-client/{}".format(VERSION)) - self.polling_interval = kwargs.get("polling_interval", 30) - self._configure(**kwargs) - - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( - self.credential, *self.credential_scopes, **kwargs - ) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py deleted file mode 100644 index 43bee04b4830..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/_patch.py +++ /dev/null @@ -1,200 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -from typing import List, Any -from azure.core import AsyncPipelineClient -from azure.core.pipeline import policies -from typing_extensions import Self - -from .._serialization import Deserializer, Serializer -from ._configuration import AzureAIClientConfiguration -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations -from ._client import AzureAIClient as ClientGenerated -from .operations._patch import InferenceOperations - - -class AzureAIClient(ClientGenerated): - - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "AsyncTokenCredential", - **kwargs: Any, - ) -> None: - # TODO: Validate input formats with regex match (e.g. subscription ID) - if not endpoint: - raise ValueError("endpoint is required") - if not subscription_id: - raise ValueError("subscription_id ID is required") - if not resource_group_name: - raise ValueError("resource_group_name is required") - if not project_name: - raise ValueError("project_name is required") - if not credential: - raise ValueError("Credential is required") - if "api_version" in kwargs: - raise ValueError("No support for overriding the API version") - if "credential_scopes" in kwargs: - raise ValueError("No support for overriding the credential scopes") - - kwargs1 = kwargs.copy() - kwargs2 = kwargs.copy() - kwargs3 = kwargs.copy() - - # For Endpoints operations (enumerating connections, getting SAS tokens) - _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config1 = AzureAIClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", - credential_scopes=["https://management.azure.com"], - **kwargs1, - ) - _policies1 = kwargs1.pop("policies", None) - if _policies1 is None: - _policies1 = [ - policies.RequestIdPolicy(**kwargs1), - self._config1.headers_policy, - self._config1.user_agent_policy, - self._config1.proxy_policy, - policies.ContentDecodePolicy(**kwargs1), - self._config1.redirect_policy, - self._config1.retry_policy, - self._config1.authentication_policy, - self._config1.custom_hook_policy, - self._config1.logging_policy, - policies.DistributedTracingPolicy(**kwargs1), - policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, - self._config1.http_logging_policy, - ] - self._client1 = AsyncPipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) - - # For Agents operations - _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config2 = AzureAIClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://ml.azure.com"], - **kwargs2, - ) - _policies2 = kwargs2.pop("policies", None) - if _policies2 is None: - _policies2 = [ - policies.RequestIdPolicy(**kwargs2), - self._config2.headers_policy, - self._config2.user_agent_policy, - self._config2.proxy_policy, - policies.ContentDecodePolicy(**kwargs2), - self._config2.redirect_policy, - self._config2.retry_policy, - self._config2.authentication_policy, - self._config2.custom_hook_policy, - self._config2.logging_policy, - policies.DistributedTracingPolicy(**kwargs2), - policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, - self._config2.http_logging_policy, - ] - self._client2 = AsyncPipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) - - # For Cloud Evaluations operations - _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config3 = AzureAIClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://management.azure.com"], # TODO: Update once service changes are ready - **kwargs3, - ) - _policies3 = kwargs3.pop("policies", None) - if _policies3 is None: - _policies3 = [ - policies.RequestIdPolicy(**kwargs3), - self._config3.headers_policy, - self._config3.user_agent_policy, - self._config3.proxy_policy, - policies.ContentDecodePolicy(**kwargs3), - self._config3.redirect_policy, - self._config3.retry_policy, - self._config3.authentication_policy, - self._config3.custom_hook_policy, - self._config3.logging_policy, - policies.DistributedTracingPolicy(**kwargs3), - policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, - self._config3.http_logging_policy, - ] - self._client3 = AsyncPipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - - self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) - self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) - self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) - self.inference = InferenceOperations(self) - - async def close(self) -> None: - await self._client1.close() - await self._client2.close() - await self._client3.close() - - async def __aenter__(self) -> Self: - await self._client1.__aenter__() - await self._client2.__aenter__() - await self._client3.__aenter__() - return self - - async def __aexit__(self, *exc_details: Any) -> None: - await self._client1.__aexit__(*exc_details) - await self._client2.__aexit__(*exc_details) - await self._client3.__aexit__(*exc_details) - - @classmethod - def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential", **kwargs) -> "AzureAIClient": - """ - Create an asynchronous AzureAIClient from a connection string. - - :param conn_str: The connection string, copied from your AI Studio project. - """ - if not conn_str: - raise ValueError("Connection string is required") - parts = conn_str.split(";") - if len(parts) != 4: - raise ValueError("Invalid connection string format") - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) - - -__all__: List[str] = [ - "AzureAIClient", -] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py deleted file mode 100644 index 56224bae24a5..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._operations import AgentsOperations -from ._operations import ConnectionsOperations -from ._operations import EvaluationsOperations - -from ._patch import __all__ as _patch_all -from ._patch import * # pylint: disable=unused-wildcard-import -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "AgentsOperations", - "ConnectionsOperations", - "EvaluationsOperations", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore -_patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py deleted file mode 100644 index 201a6016dc89..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_operations.py +++ /dev/null @@ -1,6045 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import json -import sys -from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, TYPE_CHECKING, TypeVar, Union, overload -import urllib.parse - -from azure.core.async_paging import AsyncItemPaged, AsyncList -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - StreamClosedError, - StreamConsumedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import AsyncHttpResponse, HttpRequest -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.utils import case_insensitive_dict - -from ... import _model_base, models as _models -from ..._model_base import SdkJSONEncoder, _deserialize -from ..._vendor import FileType, prepare_multipart_form_data -from ...operations._operations import ( - build_agents_cancel_run_request, - build_agents_cancel_vector_store_file_batch_request, - build_agents_create_agent_request, - build_agents_create_message_request, - build_agents_create_run_request, - build_agents_create_thread_and_run_request, - build_agents_create_thread_request, - build_agents_create_vector_store_file_batch_request, - build_agents_create_vector_store_file_request, - build_agents_create_vector_store_request, - build_agents_delete_agent_request, - build_agents_delete_file_request, - build_agents_delete_thread_request, - build_agents_delete_vector_store_file_request, - build_agents_delete_vector_store_request, - build_agents_get_agent_request, - build_agents_get_file_content_request, - build_agents_get_file_request, - build_agents_get_message_request, - build_agents_get_run_request, - build_agents_get_run_step_request, - build_agents_get_thread_request, - build_agents_get_vector_store_file_batch_request, - build_agents_get_vector_store_file_request, - build_agents_get_vector_store_request, - build_agents_list_agents_request, - build_agents_list_files_request, - build_agents_list_messages_request, - build_agents_list_run_steps_request, - build_agents_list_runs_request, - build_agents_list_vector_store_file_batch_files_request, - build_agents_list_vector_store_files_request, - build_agents_list_vector_stores_request, - build_agents_modify_vector_store_request, - build_agents_submit_tool_outputs_to_run_request, - build_agents_update_agent_request, - build_agents_update_message_request, - build_agents_update_run_request, - build_agents_update_thread_request, - build_agents_upload_file_request, - build_connections_get_request, - build_connections_list_request, - build_connections_list_secrets_request, - build_evaluations_create_or_replace_schedule_request, - build_evaluations_create_request, - build_evaluations_delete_schedule_request, - build_evaluations_get_request, - build_evaluations_get_schedule_request, - build_evaluations_list_request, - build_evaluations_list_schedule_request, - build_evaluations_update_request, -) - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore - -if TYPE_CHECKING: - from ... import _types -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -class AgentsOperations: # pylint: disable=too-many-public-methods - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.client.aio.AzureAIClient`'s - :attr:`agents` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent( - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) - - if body is _Unset: - if model is _Unset: - raise TypeError("missing required argument: model") - body = { - "description": description, - "instructions": instructions, - "metadata": metadata, - "model": model, - "name": name, - "response_format": response_format, - "temperature": temperature, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_agent_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Agent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_agents( - self, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfAgent: - """Gets a list of agents that were previously created. - - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfAgent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) - - _request = build_agents_list_agents_request( - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: - """Retrieves an existing agent. - - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) - - _request = build_agents_get_agent_request( - assistant_id=assistant_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Agent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def update_agent( - self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_agent( - self, - assistant_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_agent( - self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update_agent( - self, - assistant_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) - - if body is _Unset: - body = { - "description": description, - "instructions": instructions, - "metadata": metadata, - "model": model, - "name": name, - "response_format": response_format, - "temperature": temperature, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_agent_request( - assistant_id=assistant_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Agent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: - """Deletes an agent. - - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str - :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_agent_request( - assistant_id=assistant_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_thread( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_thread( - self, - *, - content_type: str = "application/json", - messages: Optional[List[_models.ThreadMessageOptions]] = None, - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword messages: The initial messages to associate with the new thread. Default value is - None. - :paramtype messages: list[~azure.ai.client.models.ThreadMessageOptions] - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_thread( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_thread( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - messages: Optional[List[_models.ThreadMessageOptions]] = None, - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword messages: The initial messages to associate with the new thread. Default value is - None. - :paramtype messages: list[~azure.ai.client.models.ThreadMessageOptions] - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_thread_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentThread, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: - """Gets information about an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) - - _request = build_agents_get_thread_request( - thread_id=thread_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentThread, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def update_thread( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_thread( - self, - thread_id: str, - *, - content_type: str = "application/json", - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_thread( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update_thread( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"metadata": metadata, "tool_resources": tool_resources} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_thread_request( - thread_id=thread_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentThread, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: - """Deletes an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_thread_request( - thread_id=thread_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_message( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_message( - self, - thread_id: str, - *, - role: Union[str, _models.MessageRole], - content: str, - content_type: str = "application/json", - attachments: Optional[List[_models.MessageAttachment]] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword role: The role of the entity that is creating the message. Allowed values include: - - - * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Known values are: "user" and "assistant". Required. - :paramtype role: str or ~azure.ai.client.models.MessageRole - :keyword content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :paramtype content: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword attachments: A list of files attached to the message, and the tools they should be - added to. Default value is None. - :paramtype attachments: list[~azure.ai.client.models.MessageAttachment] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_message( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_message( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - role: Union[str, _models.MessageRole] = _Unset, - content: str = _Unset, - attachments: Optional[List[_models.MessageAttachment]] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword role: The role of the entity that is creating the message. Allowed values include: - - - * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Known values are: "user" and "assistant". Required. - :paramtype role: str or ~azure.ai.client.models.MessageRole - :keyword content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :paramtype content: str - :keyword attachments: A list of files attached to the message, and the tools they should be - added to. Default value is None. - :paramtype attachments: list[~azure.ai.client.models.MessageAttachment] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - - if body is _Unset: - if role is _Unset: - raise TypeError("missing required argument: role") - if content is _Unset: - raise TypeError("missing required argument: content") - body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_message_request( - thread_id=thread_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_messages( - self, - thread_id: str, - *, - run_id: Optional[str] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfThreadMessage: - """Gets a list of messages that exist on a thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword run_id: Filter messages by the run ID that generated them. Default value is None. - :paramtype run_id: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible - with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) - - _request = build_agents_list_messages_request( - thread_id=thread_id, - run_id=run_id, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: - """Gets an existing message from an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - - _request = build_agents_get_message_request( - thread_id=thread_id, - message_id=message_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def update_message( - self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_message( - self, - thread_id: str, - message_id: str, - *, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_message( - self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update_message( - self, - thread_id: str, - message_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"metadata": metadata} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_message_request( - thread_id=thread_id, - message_id=message_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_run( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_run( - self, - thread_id: str, - *, - assistant_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_run( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_run( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - if assistant_id is _Unset: - raise TypeError("missing required argument: assistant_id") - body = { - "additional_instructions": additional_instructions, - "additional_messages": additional_messages, - "assistant_id": assistant_id, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "stream": stream_parameter, - "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, - "top_p": top_p, - "truncation_strategy": truncation_strategy, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_run_request( - thread_id=thread_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_runs( - self, - thread_id: str, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfThreadRun: - """Gets a list of runs for a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) - - _request = build_agents_list_runs_request( - thread_id=thread_id, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: - """Gets an existing run from an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - _request = build_agents_get_run_request( - thread_id=thread_id, - run_id=run_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def update_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_run( - self, - thread_id: str, - run_id: str, - *, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"metadata": metadata} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_run_request( - thread_id=thread_id, - run_id=run_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - stream_parameter: Optional[bool] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. - :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword stream_parameter: If true, returns a stream of events that happen during the Run as - server-sent events, terminating when the run enters a terminal state. Default value is None. - :paramtype stream_parameter: bool - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - stream_parameter: Optional[bool] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. - :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] - :keyword stream_parameter: If true, returns a stream of events that happen during the Run as - server-sent events, terminating when the run enters a terminal state. Default value is None. - :paramtype stream_parameter: bool - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - if tool_outputs is _Unset: - raise TypeError("missing required argument: tool_outputs") - body = {"stream": stream_parameter, "tool_outputs": tool_outputs} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_submit_tool_outputs_to_run_request( - thread_id=thread_id, - run_id=run_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: - """Cancels a run of an in progress thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - _request = build_agents_cancel_run_request( - thread_id=thread_id, - run_id=run_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_thread_and_run( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_thread_and_run( - self, - *, - assistant_id: str, - content_type: str = "application/json", - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :keyword assistant_id: The ID of the agent for which the thread should be created. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword thread: The details used to create the new thread. If no thread is provided, an empty - one will be created. Default value is None. - :paramtype thread: ~azure.ai.client.models.AgentThreadCreationOptions - :keyword model: The overridden model that the agent should use to run the thread. Default value - is None. - :paramtype model: str - :keyword instructions: The overridden system instructions the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword tools: The overridden list of enabled tools the agent should use to run the thread. - Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.UpdateToolResourcesOptions - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort to use only - the number of completion tokens specified, across multiple turns of the run. If the run - exceeds the number of completion tokens - specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more - info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_thread_and_run( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_thread_and_run( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent for which the thread should be created. Required. - :paramtype assistant_id: str - :keyword thread: The details used to create the new thread. If no thread is provided, an empty - one will be created. Default value is None. - :paramtype thread: ~azure.ai.client.models.AgentThreadCreationOptions - :keyword model: The overridden model that the agent should use to run the thread. Default value - is None. - :paramtype model: str - :keyword instructions: The overridden system instructions the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword tools: The overridden list of enabled tools the agent should use to run the thread. - Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.UpdateToolResourcesOptions - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort to use only - the number of completion tokens specified, across multiple turns of the run. If the run - exceeds the number of completion tokens - specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more - info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - if assistant_id is _Unset: - raise TypeError("missing required argument: assistant_id") - body = { - "assistant_id": assistant_id, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "stream": stream_parameter, - "temperature": temperature, - "thread": thread, - "tool_choice": tool_choice, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - "truncation_strategy": truncation_strategy, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_thread_and_run_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> _models.RunStep: - """Gets a single run step from a thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param step_id: Identifier of the run step. Required. - :type step_id: str - :return: RunStep. The RunStep is compatible with MutableMapping - :rtype: ~azure.ai.client.models.RunStep - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) - - _request = build_agents_get_run_step_request( - thread_id=thread_id, - run_id=run_id, - step_id=step_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.RunStep, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_run_steps( - self, - thread_id: str, - run_id: str, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfRunStep: - """Gets a list of run steps from a thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfRunStep - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) - - _request = build_agents_list_run_steps_request( - thread_id=thread_id, - run_id=run_id, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_files( - self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any - ) -> _models.FileListResponse: - """Gets a list of previously uploaded files. - - :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", - "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is - None. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :return: FileListResponse. The FileListResponse is compatible with MutableMapping - :rtype: ~azure.ai.client.models.FileListResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) - - _request = build_agents_list_files_request( - purpose=purpose, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.FileListResponse, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file( - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: The file data, in bytes. Required. - :paramtype file: ~azure.ai.client._vendor.FileType - :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and - Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and - ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", - "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :keyword filename: The name of the file. Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def upload_file( - self, - body: JSON = _Unset, - *, - file: FileType = _Unset, - purpose: Union[str, _models.FilePurpose] = _Unset, - filename: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Is one of the following types: JSON Required. - :type body: JSON - :keyword file: The file data, in bytes. Required. - :paramtype file: ~azure.ai.client._vendor.FileType - :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and - Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and - ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", - "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :keyword filename: The name of the file. Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) - - if body is _Unset: - if file is _Unset: - raise TypeError("missing required argument: file") - if purpose is _Unset: - raise TypeError("missing required argument: purpose") - body = {"file": file, "filename": filename, "purpose": purpose} - body = {k: v for k, v in body.items() if v is not None} - _body = body.as_dict() if isinstance(body, _model_base.Model) else body - _file_fields: List[str] = ["file"] - _data_fields: List[str] = ["purpose", "filename"] - _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) - - _request = build_agents_upload_file_request( - api_version=self._config.api_version, - files=_files, - data=_data, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: - """Delete a previously uploaded file. - - :param file_id: The ID of the file to delete. Required. - :type file_id: str - :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.client.models.FileDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_file_request( - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.FileDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: - """Returns information about a specific file. Does not retrieve file content. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) - - _request = build_agents_get_file_request( - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentResponse: - """Returns information about a specific file. Does not retrieve file content. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: FileContentResponse. The FileContentResponse is compatible with MutableMapping - :rtype: ~azure.ai.client.models.FileContentResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.FileContentResponse] = kwargs.pop("cls", None) - - _request = build_agents_get_file_content_request( - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.FileContentResponse, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_vector_stores( - self, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfVectorStore: - """Returns a list of vector stores. - - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible - with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) - - _request = build_agents_list_vector_stores_request( - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_vector_store( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - - if body is _Unset: - body = { - "chunking_strategy": chunking_strategy, - "expires_after": expires_after, - "file_ids": file_ids, - "metadata": metadata, - "name": name, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_vector_store_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: - """Returns the vector store object matching the specified ID. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - - _request = build_agents_get_vector_store_request( - vector_store_id=vector_store_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def modify_vector_store( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def modify_vector_store( - self, - vector_store_id: str, - *, - content_type: str = "application/json", - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def modify_vector_store( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def modify_vector_store( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"expires_after": expires_after, "metadata": metadata, "name": name} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_modify_vector_store_request( - vector_store_id=vector_store_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: - """Deletes the vector store object matching the specified ID. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_vector_store_request( - vector_store_id=vector_store_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_vector_store_files( - self, - vector_store_id: str, - *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfVectorStoreFile: - """Returns a list of vector store files. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", - and "cancelled". Default value is None. - :paramtype filter: str or ~azure.ai.client.models.VectorStoreFileStatusFilter - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is - compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) - - _request = build_agents_list_vector_store_files_request( - vector_store_id=vector_store_id, - filter=filter, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_vector_store_file( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file( - self, - vector_store_id: str, - *, - file_id: str, - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_id: Identifier of the file. Required. - :paramtype file_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_file( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_id: str = _Unset, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_id: Identifier of the file. Required. - :paramtype file_id: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) - - if body is _Unset: - if file_id is _Unset: - raise TypeError("missing required argument: file_id") - body = {"chunking_strategy": chunking_strategy, "file_id": file_id} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_vector_store_file_request( - vector_store_id=vector_store_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: - """Retrieves a vector store file. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param file_id: Identifier of the file. Required. - :type file_id: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) - - _request = build_agents_get_vector_store_file_request( - vector_store_id=vector_store_id, - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete_vector_store_file( - self, vector_store_id: str, file_id: str, **kwargs: Any - ) -> _models.VectorStoreFileDeletionStatus: - """Delete a vector store file. This will remove the file from the vector store but the file itself - will not be deleted. - To delete the file, use the delete file endpoint. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param file_id: Identifier of the file. Required. - :type file_id: str - :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_vector_store_file_request( - vector_store_id=vector_store_id, - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_vector_store_file_batch( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_batch( - self, - vector_store_id: str, - *, - file_ids: List[str], - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_batch( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_file_batch( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: List[str] = _Unset, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - - if body is _Unset: - if file_ids is _Unset: - raise TypeError("missing required argument: file_ids") - body = {"chunking_strategy": chunking_strategy, "file_ids": file_ids} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_vector_store_file_batch_request( - vector_store_id=vector_store_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_vector_store_file_batch( - self, vector_store_id: str, batch_id: str, **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Retrieve a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param batch_id: Identifier of the file batch. Required. - :type batch_id: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - - _request = build_agents_get_vector_store_file_batch_request( - vector_store_id=vector_store_id, - batch_id=batch_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def cancel_vector_store_file_batch( - self, vector_store_id: str, batch_id: str, **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch - as soon as possible. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param batch_id: Identifier of the file batch. Required. - :type batch_id: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - - _request = build_agents_cancel_vector_store_file_batch_request( - vector_store_id=vector_store_id, - batch_id=batch_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_vector_store_file_batch_files( - self, - vector_store_id: str, - batch_id: str, - *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfVectorStoreFile: - """Returns a list of vector store files in a batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param batch_id: Identifier of the file batch. Required. - :type batch_id: str - :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", - and "cancelled". Default value is None. - :paramtype filter: str or ~azure.ai.client.models.VectorStoreFileStatusFilter - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is - compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) - - _request = build_agents_list_vector_store_file_batch_files_request( - vector_store_id=vector_store_id, - batch_id=batch_id, - filter=filter, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - -class ConnectionsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.client.aio.AzureAIClient`'s - :attr:`connections` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace_async - async def _list( # pylint: disable=protected-access - self, - *, - category: Optional[Union[str, _models.ConnectionType]] = None, - include_all: Optional[bool] = None, - target: Optional[str] = None, - **kwargs: Any - ) -> _models._models.ConnectionsListResponse: - """List the details of all the connections (not including their credentials). - - :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". Default value is None. - :paramtype category: str or ~azure.ai.client.models.ConnectionType - :keyword include_all: Indicates whether to list datastores. Service default: do not list - datastores. Default value is None. - :paramtype include_all: bool - :keyword target: Target of the workspace connection. Default value is None. - :paramtype target: str - :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping - :rtype: ~azure.ai.client.models._models.ConnectionsListResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) - - _request = build_connections_list_request( - category=category, - include_all=include_all, - target=target, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize( - _models._models.ConnectionsListResponse, response.json() # pylint: disable=protected-access - ) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def _get( # pylint: disable=protected-access - self, connection_name: str, **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: - """Get the details of a single connection, without credentials. - - :param connection_name: Connection Name. Required. - :type connection_name: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.client.models._models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) - - _request = build_connections_get_request( - connection_name=connection_name, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize( - _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access - ) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def _list_secrets( # pylint: disable=protected-access - self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... - @overload - async def _list_secrets( # pylint: disable=protected-access - self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... - @overload - async def _list_secrets( # pylint: disable=protected-access - self, connection_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... - - @distributed_trace_async - async def _list_secrets( # pylint: disable=protected-access - self, connection_name: str, body: Union[JSON, IO[bytes]] = _Unset, *, ignored: str = _Unset, **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: - """Get the details of a single connection, including credentials (if available). - - :param connection_name: Connection Name. Required. - :type connection_name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword ignored: The body is ignored. TODO: Can we remove this?. Required. - :paramtype ignored: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.client.models._models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) - - if body is _Unset: - if ignored is _Unset: - raise TypeError("missing required argument: ignored") - body = {"ignored": ignored} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_connections_list_secrets_request( - connection_name=connection_name, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize( - _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access - ) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - -class EvaluationsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.client.aio.AzureAIClient`'s - :attr:`evaluations` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace_async - async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: - """Resource read operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - _request = build_evaluations_get_request( - id=id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @overload - async def create( - self, evaluation: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Required. - :type evaluation: ~azure.ai.client.models.Evaluation - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create( - self, evaluation: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Required. - :type evaluation: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create( - self, evaluation: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Required. - :type evaluation: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Is one of the following types: Evaluation, JSON, - IO[bytes] Required. - :type evaluation: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(evaluation, (IOBase, bytes)): - _content = evaluation - else: - _content = json.dumps(evaluation, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_evaluations_create_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any - ) -> AsyncIterable["_models.Evaluation"]: - """Resource list operation template. - - :keyword top: The number of result items to return. Default value is None. - :paramtype top: int - :keyword skip: The number of result items to skip. Default value is None. - :paramtype skip: int - :return: An iterator like instance of Evaluation - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.client.models.Evaluation] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_evaluations_list_request( - top=top, - skip=skip, - maxpagesize=maxpagesize, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - async def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return AsyncItemPaged(get_next, extract_data) - - @overload - async def update( - self, - id: str, - resource: _models.Evaluation, - *, - content_type: str = "application/merge-patch+json", - **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: ~azure.ai.client.models.Evaluation - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update( - self, id: str, resource: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update( - self, id: str, resource: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update( - self, id: str, resource: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Is one of the following types: Evaluation, JSON, - IO[bytes] Required. - :type resource: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - content_type = content_type or "application/merge-patch+json" - _content = None - if isinstance(resource, (IOBase, bytes)): - _content = resource - else: - _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_evaluations_update_request( - id=id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_schedule(self, id: str, **kwargs: Any) -> _models.EvaluationSchedule: - """Resource read operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.client.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) - - _request = build_evaluations_get_schedule_request( - id=id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.EvaluationSchedule, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_or_replace_schedule( - self, id: str, resource: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: ~azure.ai.client.models.EvaluationSchedule - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.client.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_or_replace_schedule( - self, id: str, resource: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.client.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_or_replace_schedule( - self, id: str, resource: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.client.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_or_replace_schedule( - self, id: str, resource: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Is one of the following types: EvaluationSchedule, - JSON, IO[bytes] Required. - :type resource: ~azure.ai.client.models.EvaluationSchedule or JSON or IO[bytes] - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.client.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(resource, (IOBase, bytes)): - _content = resource - else: - _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_evaluations_create_or_replace_schedule_request( - id=id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.EvaluationSchedule, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_schedule( - self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any - ) -> AsyncIterable["_models.EvaluationSchedule"]: - """Resource list operation template. - - :keyword top: The number of result items to return. Default value is None. - :paramtype top: int - :keyword skip: The number of result items to skip. Default value is None. - :paramtype skip: int - :return: An iterator like instance of EvaluationSchedule - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.client.models.EvaluationSchedule] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.EvaluationSchedule]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_evaluations_list_schedule_request( - top=top, - skip=skip, - maxpagesize=maxpagesize, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - async def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.EvaluationSchedule], deserialized["value"]) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return AsyncItemPaged(get_next, extract_data) - - @distributed_trace_async - async def delete_schedule(self, id: str, **kwargs: Any) -> None: - """Resource delete operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[None] = kwargs.pop("cls", None) - - _request = build_evaluations_delete_schedule_request( - id=id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if cls: - return cls(pipeline_response, None, response_headers) # type: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py deleted file mode 100644 index 5f84899dcf9c..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/aio/operations/_patch.py +++ /dev/null @@ -1,1977 +0,0 @@ -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -from ..._vendor import FileType -import io -import logging -import os -import time -from typing import IO, Any, AsyncIterator, Dict, List, Iterable, MutableMapping, Optional, Union, cast, overload - -from azure.ai.client import _types -from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated -from ._operations import AgentsOperations as AgentsOperationsGenerated -from ...models._patch import ConnectionProperties -from ...models._enums import AuthenticationType, ConnectionType, FilePurpose -from ...models._models import ConnectionsListSecretsResponse, ConnectionsListResponse -from ... import models as _models -from azure.core.tracing.decorator_async import distributed_trace_async - -logger = logging.getLogger(__name__) - -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() - - -class InferenceOperations: - - def __init__(self, outer_instance): - self.outer_instance = outer_instance - - @distributed_trace_async - async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": - """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for the default - Serverless connection. The Serverless connection must have a Chat Completions AI model deployment. - The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. - - :return: An authenticated chat completions client - :rtype: ~azure.ai.inference.models.ChatCompletionsClient - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connection = await self.outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No serverless connection found") - - try: - from azure.ai.inference.aio import ChatCompletionsClient - except ModuleNotFoundError as _: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) - ) - elif connection.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" - ) - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential - ) - elif connection.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. - logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" - ) - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace_async - async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": - """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for the default - Serverless connection. The Serverless connection must have a Text Embeddings AI model deployment. - The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. - - :return: An authenticated chat completions client - :rtype: ~azure.ai.inference.models.EmbeddingsClient - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connection = await self.outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No serverless connection found") - - try: - from azure.ai.inference.aio import EmbeddingsClient - except ModuleNotFoundError as _: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) - elif connection.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" - ) - client = EmbeddingsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential - ) - elif connection.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" - ) - client = EmbeddingsClient(endpoint=connection.connection_url, credential=connection.token_credential) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace_async - async def get_azure_openai_client(self, **kwargs) -> "AsyncAzureOpenAI": - """Get an authenticated AsyncAzureOpenAI client (from the `openai` package) for the default - Azure OpenAI connection. The package `openai` must be installed prior to calling this method. - - :return: An authenticated AsyncAzureOpenAI client - :rtype: ~openai.AsyncAzureOpenAI - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connection = await self.outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No Azure OpenAI connection found.") - - try: - from openai import AsyncAzureOpenAI - except ModuleNotFoundError as _: - raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai-async'") - - # Pick latest GA version from the "Data plane - Inference" row in the table - # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - AZURE_OPENAI_API_VERSION = "2024-06-01" - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" - ) - client = AsyncAzureOpenAI( - api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION - ) - elif connection.authentication_type == AuthenticationType.AAD: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" - ) - try: - from azure.identity import get_bearer_token_provider - except ModuleNotFoundError as _: - raise ModuleNotFoundError( - "azure.identity package not installed. Please install it using 'pip install azure.identity'" - ) - client = AsyncAzureOpenAI( - # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") - client = AsyncAzureOpenAI( - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION, - ) - else: - raise ValueError("Unknown authentication type") - - return client - - -class ConnectionsOperations(ConnectionsOperationsGenerated): - - @distributed_trace_async - async def get_default( - self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: - """Get the properties of the default connection of a certain connection type, with or without - populating authentication credentials. - - :param connection_type: The connection type. Required. - :type connection_type: ~azure.ai.client.models._models.ConnectionType - :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. - :type with_credentials: bool - :return: The connection properties - :rtype: ~azure.ai.client.models._models.ConnectionProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_type: - raise ValueError("You must specify an connection type") - # Since there is no notion of default connection at the moment, list all connections in the category - # and return the first one - connection_properties_list = await self.list(connection_type=connection_type, **kwargs) - if len(connection_properties_list) > 0: - if with_credentials: - return await self.get( - connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs - ) - else: - return connection_properties_list[0] - else: - return None - - @distributed_trace_async - async def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: - """Get the properties of a single connection, given its connection name, with or without - populating authentication credentials. - - :param connection_name: Connection Name. Required. - :type connection_name: str - :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. - :type with_credentials: bool - :return: The connection properties - :rtype: ~azure.ai.client.models._models.ConnectionProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_name: - raise ValueError("Endpoint name cannot be empty") - if with_credentials: - connection: ConnectionsListSecretsResponse = await self._list_secrets( - connection_name=connection_name, ignored="ignore", **kwargs - ) - if connection.properties.auth_type == AuthenticationType.AAD: - return ConnectionProperties(connection=connection, token_credential=self._config.credential) - elif connection.properties.auth_type == AuthenticationType.SAS: - from ...models._patch import SASTokenCredential - - token_credential = SASTokenCredential( - sas_token=connection.properties.credentials.sas, - credential=self._config.credential, - subscription_id=self._config.subscription_id, - resource_group_name=self._config.resource_group_name, - project_name=self._config.project_name, - connection_name=connection_name, - ) - return ConnectionProperties(connection=connection, token_credential=token_credential) - - return ConnectionProperties(connection=connection) - else: - return ConnectionProperties(connection=await self._get(connection_name=connection_name, **kwargs)) - - @distributed_trace_async - async def list( - self, *, connection_type: ConnectionType | None = None, **kwargs: Any - ) -> Iterable[ConnectionProperties]: - """List the properties of all connections, or all connections of a certain connection type. - - :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. - If not provided, all connections are listed. - :type connection_type: ~azure.ai.client.models._models.ConnectionType - :return: A list of connection properties - :rtype: Iterable[~azure.ai.client.models._models.ConnectionProperties] - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connections_list: ConnectionsListResponse = await self._list( - include_all=True, category=connection_type, **kwargs - ) - - # Iterate to create the simplified result property - connection_properties_list: List[ConnectionProperties] = [] - for connection in connections_list.value: - connection_properties_list.append(ConnectionProperties(connection=connection)) - - return connection_properties_list - - -class AgentsOperations(AgentsOperationsGenerated): - - @overload - async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent( - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent( - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.client.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.Agent: - """ - Creates a new agent with various configurations, delegating to the generated operations. - - :param body: JSON or IO[bytes]. Required if `model` is not provided. - :param model: The ID of the model to use. Required if `body` is not provided. - :param name: The name of the new agent. - :param description: A description for the new agent. - :param instructions: System instructions for the agent. - :param tools: List of tools definitions for the agent. - :param tool_resources: Resources used by the agent's tools. - :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). - :param temperature: Sampling temperature for generating agent responses. - :param top_p: Nucleus sampling parameter. - :param response_format: Response format for tool calls. - :param metadata: Key/value pairs for storing additional information. - :param content_type: Content type of the body. - :param kwargs: Additional parameters. - :return: An Agent object. - :raises: HttpResponseError for HTTP errors. - """ - if body is not _Unset: - if isinstance(body, io.IOBase): - return await super().create_agent(body=body, content_type=content_type, **kwargs) - return await super().create_agent(body=body, **kwargs) - - if toolset is not None: - self._toolset = toolset - tools = toolset.definitions - tool_resources = toolset.resources - - return await super().create_agent( - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - def get_toolset(self) -> Optional[_models.AsyncToolSet]: - """ - Get the toolset for the agent. - - :return: The toolset for the agent. If not set, returns None. - :rtype: ~azure.ai.client.models.AsyncToolSet - """ - if hasattr(self, "_toolset"): - return self._toolset - return None - - @overload - async def create_run( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_run( - self, - thread_id: str, - *, - assistant_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_run( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_run( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - elif assistant_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=False, - stream=False, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # If streaming is enabled, return the custom stream object - return await response - - @distributed_trace_async - async def create_and_process_run( - self, - thread_id: str, - assistant_id: str, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: int = 1, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread and processes the run. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. - Default value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run - the thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or - ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or - ~azure.ai.client.models.AgentsApiResponseFormatMode or - ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: The time in seconds to wait between polling the service for run status. - Default value is 1. - :paramtype sleep_interval: int - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - # Create and initiate the run with additional parameters - run = await self.create_run( - thread_id=thread_id, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - # Monitor and process the run status - while run.status in ["queued", "in_progress", "requires_action"]: - time.sleep(sleep_interval) - run = await self.get_run(thread_id=thread_id, run_id=run.id) - - if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logging.warning("No tool calls provided - cancelling run") - await self.cancel_run(thread_id=thread_id, run_id=run.id) - break - - toolset = self.get_toolset() - if toolset: - tool_outputs = await toolset.execute_tool_calls(tool_calls) - else: - raise ValueError("Toolset is not available in the client.") - - logging.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - await self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) - - logging.info("Current run status: %s", run.status) - - return run - - @overload - def create_stream( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AsyncAgentRunStream: - """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_stream( - self, - thread_id: str, - *, - assistant_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_stream( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AsyncAgentRunStream: - """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream: - """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - elif assistant_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=True, - stream=True, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) - - return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - @overload - async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] - :param event_handler: The event handler to use for processing events during the run. - :param kwargs: Additional parameters. - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # If streaming is enabled, return the custom stream object - return await response - - @overload - async def submit_tool_outputs_to_stream( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AsyncAgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AsyncAgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_stream( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AsyncAgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] - :param event_handler: The event handler to use for processing events during the run. - :param kwargs: Additional parameters. - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # Cast the response to Iterator[bytes] for type correctness - response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) - - return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - async def _handle_submit_tool_outputs( - self, run: _models.ThreadRun, event_handler: Optional[_models.AsyncAgentEventHandler] = None - ) -> None: - if isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logger.debug("No tool calls to execute.") - return - - toolset = self.get_toolset() - if toolset: - tool_outputs = await toolset.execute_tool_calls(tool_calls) - else: - logger.warning("Toolset is not available in the client.") - return - - logger.info(f"Tool outputs: {tool_outputs}") - if tool_outputs: - async with await self.submit_tool_outputs_to_stream( - thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler - ) as stream: - await stream.until_done() - - @overload - async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file( - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.client._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file( - self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def upload_file( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :param file: File content. Required if `body` and `purpose` are not provided. - :param file_path: Path to the file. Required if `body` and `purpose` are not provided. - :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :param filename: The name of the file. - :param kwargs: Additional parameters. - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - return await super().upload_file(body=body, **kwargs) - - if isinstance(purpose, FilePurpose): - purpose = purpose.value - - if file is not None and purpose is not None: - return await super().upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - - if file_path is not None and purpose is not None: - if not os.path.isfile(file_path): - raise FileNotFoundError(f"The file path provided does not exist: {file_path}") - - try: - with open(file_path, "rb") as f: - content = f.read() - - # Determine filename and create correct FileType - base_filename = filename or os.path.basename(file_path) - file_content: FileType = (base_filename, content) - - return await super().upload_file(file=file_content, purpose=purpose, **kwargs) - except IOError as e: - raise IOError(f"Unable to read file: {file_path}. Reason: {str(e)}") - - raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") - - @overload - async def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file_and_poll( - self, - *, - file: FileType, - purpose: Union[str, _models.FilePurpose], - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.client._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file_and_poll( - self, file_path: str, *, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def upload_file_and_poll( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :param file: File content. Required if `body` and `purpose` are not provided. - :param file_path: Path to the file. Required if `body` and `purpose` are not provided. - :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :param filename: The name of the file. - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :param kwargs: Additional parameters. - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - uploaded_file = await self.upload_file(body=body, **kwargs) - elif file is not None and purpose is not None: - uploaded_file = await self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - elif file_path is not None and purpose is not None: - uploaded_file = await self.upload_file(file_path=file_path, purpose=purpose, **kwargs) - else: - raise ValueError( - "Invalid parameters for upload_file_and_poll. Please provide either 'body', " - "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." - ) - - while uploaded_file.status in ["uploaded", "pending", "running"]: - time.sleep(sleep_interval) - uploaded_file = await self.get_file(uploaded_file.id) - - return uploaded_file - - @overload - async def create_vector_store_and_poll( - self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_and_poll( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_and_poll( - self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_and_poll( - self, - body: Union[JSON, IO[bytes], None] = None, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not None: - vector_store = await self.create_vector_store(body=body, content_type=content_type, **kwargs) - elif file_ids is not None or (name is not None and expires_after is not None): - vector_store = await self.create_vector_store( - content_type=content_type, - file_ids=file_ids, - name=name, - expires_after=expires_after, - chunking_strategy=chunking_strategy, - metadata=metadata, - **kwargs, - ) - else: - raise ValueError( - "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " - "'file_ids', or 'name' and 'expires_after'." - ) - - while vector_store.status == "in_progress": - time.sleep(sleep_interval) - vector_store = await self.get_vector_store(vector_store.id) - - return vector_store - - @overload - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - *, - file_ids: List[str], - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = None, - *, - file_ids: List[str] = _Unset, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is None: - vector_store_file_batch = await super().create_vector_store_file_batch( - vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs - ) - else: - content_type = kwargs.get("content_type", "application/json") - vector_store_file_batch = await super().create_vector_store_file_batch( - body=body, content_type=content_type, **kwargs - ) - - while vector_store_file_batch.status == "in_progress": - time.sleep(sleep_interval) - vector_store_file_batch = await super().get_vector_store_file_batch( - vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id - ) - - return vector_store_file_batch - - -__all__: List[str] = [ - "AgentsOperations", - "ConnectionsOperations", - "InferenceOperations", -] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py deleted file mode 100644 index 9d07edc2486e..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/__init__.py +++ /dev/null @@ -1,365 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._models import Agent -from ._models import AgentDeletionStatus -from ._models import AgentThread -from ._models import AgentThreadCreationOptions -from ._models import AgentsApiResponseFormat -from ._models import AgentsNamedToolChoice -from ._models import AppInsightsConfiguration -from ._models import AzureAISearchResource -from ._models import AzureAISearchToolDefinition -from ._models import BingGroundingToolDefinition -from ._models import CodeInterpreterToolDefinition -from ._models import CodeInterpreterToolResource -from ._models import ConnectionListResource -from ._models import ConnectionResource -from ._models import CronTrigger -from ._models import Dataset -from ._models import Evaluation -from ._models import EvaluationSchedule -from ._models import EvaluatorConfiguration -from ._models import FileContentResponse -from ._models import FileDeletionStatus -from ._models import FileListResponse -from ._models import FileSearchToolDefinition -from ._models import FileSearchToolDefinitionDetails -from ._models import FileSearchToolResource -from ._models import FunctionDefinition -from ._models import FunctionName -from ._models import FunctionToolDefinition -from ._models import IndexResource -from ._models import InputData -from ._models import MessageAttachment -from ._models import MessageContent -from ._models import MessageDelta -from ._models import MessageDeltaChunk -from ._models import MessageDeltaContent -from ._models import MessageDeltaImageFileContent -from ._models import MessageDeltaImageFileContentObject -from ._models import MessageDeltaTextAnnotation -from ._models import MessageDeltaTextContent -from ._models import MessageDeltaTextContentObject -from ._models import MessageDeltaTextFileCitationAnnotation -from ._models import MessageDeltaTextFileCitationAnnotationObject -from ._models import MessageDeltaTextFilePathAnnotation -from ._models import MessageDeltaTextFilePathAnnotationObject -from ._models import MessageImageFileContent -from ._models import MessageImageFileDetails -from ._models import MessageIncompleteDetails -from ._models import MessageTextAnnotation -from ._models import MessageTextContent -from ._models import MessageTextDetails -from ._models import MessageTextFileCitationAnnotation -from ._models import MessageTextFileCitationDetails -from ._models import MessageTextFilePathAnnotation -from ._models import MessageTextFilePathDetails -from ._models import MicrosoftFabricToolDefinition -from ._models import OpenAIFile -from ._models import OpenAIPageableListOfAgent -from ._models import OpenAIPageableListOfRunStep -from ._models import OpenAIPageableListOfThreadMessage -from ._models import OpenAIPageableListOfThreadRun -from ._models import OpenAIPageableListOfVectorStore -from ._models import OpenAIPageableListOfVectorStoreFile -from ._models import RecurrenceSchedule -from ._models import RecurrenceTrigger -from ._models import RequiredAction -from ._models import RequiredFunctionToolCall -from ._models import RequiredFunctionToolCallDetails -from ._models import RequiredToolCall -from ._models import RunCompletionUsage -from ._models import RunError -from ._models import RunStep -from ._models import RunStepAzureAISearchToolCall -from ._models import RunStepBingGroundingToolCall -from ._models import RunStepCodeInterpreterImageOutput -from ._models import RunStepCodeInterpreterImageReference -from ._models import RunStepCodeInterpreterLogOutput -from ._models import RunStepCodeInterpreterToolCall -from ._models import RunStepCodeInterpreterToolCallDetails -from ._models import RunStepCodeInterpreterToolCallOutput -from ._models import RunStepCompletionUsage -from ._models import RunStepDelta -from ._models import RunStepDeltaChunk -from ._models import RunStepDeltaCodeInterpreterDetailItemObject -from ._models import RunStepDeltaCodeInterpreterImageOutput -from ._models import RunStepDeltaCodeInterpreterImageOutputObject -from ._models import RunStepDeltaCodeInterpreterLogOutput -from ._models import RunStepDeltaCodeInterpreterOutput -from ._models import RunStepDeltaCodeInterpreterToolCall -from ._models import RunStepDeltaDetail -from ._models import RunStepDeltaFileSearchToolCall -from ._models import RunStepDeltaFunction -from ._models import RunStepDeltaFunctionToolCall -from ._models import RunStepDeltaMessageCreation -from ._models import RunStepDeltaMessageCreationObject -from ._models import RunStepDeltaToolCall -from ._models import RunStepDeltaToolCallObject -from ._models import RunStepDetails -from ._models import RunStepError -from ._models import RunStepFileSearchToolCall -from ._models import RunStepFunctionToolCall -from ._models import RunStepFunctionToolCallDetails -from ._models import RunStepMessageCreationDetails -from ._models import RunStepMessageCreationReference -from ._models import RunStepMicrosoftFabricToolCall -from ._models import RunStepSharepointToolCall -from ._models import RunStepToolCall -from ._models import RunStepToolCallDetails -from ._models import SamplingStrategy -from ._models import SharepointToolDefinition -from ._models import SubmitToolOutputsAction -from ._models import SubmitToolOutputsDetails -from ._models import SystemData -from ._models import ThreadDeletionStatus -from ._models import ThreadMessage -from ._models import ThreadMessageOptions -from ._models import ThreadRun -from ._models import ToolDefinition -from ._models import ToolOutput -from ._models import ToolResources -from ._models import Trigger -from ._models import TruncationObject -from ._models import UpdateCodeInterpreterToolResourceOptions -from ._models import UpdateFileSearchToolResourceOptions -from ._models import UpdateToolResourcesOptions -from ._models import VectorStore -from ._models import VectorStoreAutoChunkingStrategyRequest -from ._models import VectorStoreAutoChunkingStrategyResponse -from ._models import VectorStoreChunkingStrategyRequest -from ._models import VectorStoreChunkingStrategyResponse -from ._models import VectorStoreDeletionStatus -from ._models import VectorStoreExpirationPolicy -from ._models import VectorStoreFile -from ._models import VectorStoreFileBatch -from ._models import VectorStoreFileCount -from ._models import VectorStoreFileDeletionStatus -from ._models import VectorStoreFileError -from ._models import VectorStoreStaticChunkingStrategyOptions -from ._models import VectorStoreStaticChunkingStrategyRequest -from ._models import VectorStoreStaticChunkingStrategyResponse - -from ._enums import AgentStreamEvent -from ._enums import AgentsApiResponseFormatMode -from ._enums import AgentsApiToolChoiceOptionMode -from ._enums import AgentsNamedToolChoiceType -from ._enums import ApiResponseFormat -from ._enums import AuthenticationType -from ._enums import ConnectionType -from ._enums import DoneEvent -from ._enums import ErrorEvent -from ._enums import FilePurpose -from ._enums import FileState -from ._enums import Frequency -from ._enums import IncompleteRunDetails -from ._enums import ListSortOrder -from ._enums import MessageIncompleteDetailsReason -from ._enums import MessageRole -from ._enums import MessageStatus -from ._enums import MessageStreamEvent -from ._enums import RunStatus -from ._enums import RunStepErrorCode -from ._enums import RunStepStatus -from ._enums import RunStepStreamEvent -from ._enums import RunStepType -from ._enums import RunStreamEvent -from ._enums import ThreadStreamEvent -from ._enums import TruncationStrategy -from ._enums import VectorStoreChunkingStrategyRequestType -from ._enums import VectorStoreChunkingStrategyResponseType -from ._enums import VectorStoreExpirationPolicyAnchor -from ._enums import VectorStoreFileBatchStatus -from ._enums import VectorStoreFileErrorCode -from ._enums import VectorStoreFileStatus -from ._enums import VectorStoreFileStatusFilter -from ._enums import VectorStoreStatus -from ._enums import WeekDays -from ._patch import __all__ as _patch_all -from ._patch import * # pylint: disable=unused-wildcard-import -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "Agent", - "AgentDeletionStatus", - "AgentThread", - "AgentThreadCreationOptions", - "AgentsApiResponseFormat", - "AgentsNamedToolChoice", - "AppInsightsConfiguration", - "AzureAISearchResource", - "AzureAISearchToolDefinition", - "BingGroundingToolDefinition", - "CodeInterpreterToolDefinition", - "CodeInterpreterToolResource", - "ConnectionListResource", - "ConnectionResource", - "CronTrigger", - "Dataset", - "Evaluation", - "EvaluationSchedule", - "EvaluatorConfiguration", - "FileContentResponse", - "FileDeletionStatus", - "FileListResponse", - "FileSearchToolDefinition", - "FileSearchToolDefinitionDetails", - "FileSearchToolResource", - "FunctionDefinition", - "FunctionName", - "FunctionToolDefinition", - "IndexResource", - "InputData", - "MessageAttachment", - "MessageContent", - "MessageDelta", - "MessageDeltaChunk", - "MessageDeltaContent", - "MessageDeltaImageFileContent", - "MessageDeltaImageFileContentObject", - "MessageDeltaTextAnnotation", - "MessageDeltaTextContent", - "MessageDeltaTextContentObject", - "MessageDeltaTextFileCitationAnnotation", - "MessageDeltaTextFileCitationAnnotationObject", - "MessageDeltaTextFilePathAnnotation", - "MessageDeltaTextFilePathAnnotationObject", - "MessageImageFileContent", - "MessageImageFileDetails", - "MessageIncompleteDetails", - "MessageTextAnnotation", - "MessageTextContent", - "MessageTextDetails", - "MessageTextFileCitationAnnotation", - "MessageTextFileCitationDetails", - "MessageTextFilePathAnnotation", - "MessageTextFilePathDetails", - "MicrosoftFabricToolDefinition", - "OpenAIFile", - "OpenAIPageableListOfAgent", - "OpenAIPageableListOfRunStep", - "OpenAIPageableListOfThreadMessage", - "OpenAIPageableListOfThreadRun", - "OpenAIPageableListOfVectorStore", - "OpenAIPageableListOfVectorStoreFile", - "RecurrenceSchedule", - "RecurrenceTrigger", - "RequiredAction", - "RequiredFunctionToolCall", - "RequiredFunctionToolCallDetails", - "RequiredToolCall", - "RunCompletionUsage", - "RunError", - "RunStep", - "RunStepAzureAISearchToolCall", - "RunStepBingGroundingToolCall", - "RunStepCodeInterpreterImageOutput", - "RunStepCodeInterpreterImageReference", - "RunStepCodeInterpreterLogOutput", - "RunStepCodeInterpreterToolCall", - "RunStepCodeInterpreterToolCallDetails", - "RunStepCodeInterpreterToolCallOutput", - "RunStepCompletionUsage", - "RunStepDelta", - "RunStepDeltaChunk", - "RunStepDeltaCodeInterpreterDetailItemObject", - "RunStepDeltaCodeInterpreterImageOutput", - "RunStepDeltaCodeInterpreterImageOutputObject", - "RunStepDeltaCodeInterpreterLogOutput", - "RunStepDeltaCodeInterpreterOutput", - "RunStepDeltaCodeInterpreterToolCall", - "RunStepDeltaDetail", - "RunStepDeltaFileSearchToolCall", - "RunStepDeltaFunction", - "RunStepDeltaFunctionToolCall", - "RunStepDeltaMessageCreation", - "RunStepDeltaMessageCreationObject", - "RunStepDeltaToolCall", - "RunStepDeltaToolCallObject", - "RunStepDetails", - "RunStepError", - "RunStepFileSearchToolCall", - "RunStepFunctionToolCall", - "RunStepFunctionToolCallDetails", - "RunStepMessageCreationDetails", - "RunStepMessageCreationReference", - "RunStepMicrosoftFabricToolCall", - "RunStepSharepointToolCall", - "RunStepToolCall", - "RunStepToolCallDetails", - "SamplingStrategy", - "SharepointToolDefinition", - "SubmitToolOutputsAction", - "SubmitToolOutputsDetails", - "SystemData", - "ThreadDeletionStatus", - "ThreadMessage", - "ThreadMessageOptions", - "ThreadRun", - "ToolDefinition", - "ToolOutput", - "ToolResources", - "Trigger", - "TruncationObject", - "UpdateCodeInterpreterToolResourceOptions", - "UpdateFileSearchToolResourceOptions", - "UpdateToolResourcesOptions", - "VectorStore", - "VectorStoreAutoChunkingStrategyRequest", - "VectorStoreAutoChunkingStrategyResponse", - "VectorStoreChunkingStrategyRequest", - "VectorStoreChunkingStrategyResponse", - "VectorStoreDeletionStatus", - "VectorStoreExpirationPolicy", - "VectorStoreFile", - "VectorStoreFileBatch", - "VectorStoreFileCount", - "VectorStoreFileDeletionStatus", - "VectorStoreFileError", - "VectorStoreStaticChunkingStrategyOptions", - "VectorStoreStaticChunkingStrategyRequest", - "VectorStoreStaticChunkingStrategyResponse", - "AgentStreamEvent", - "AgentsApiResponseFormatMode", - "AgentsApiToolChoiceOptionMode", - "AgentsNamedToolChoiceType", - "ApiResponseFormat", - "AuthenticationType", - "ConnectionType", - "DoneEvent", - "ErrorEvent", - "FilePurpose", - "FileState", - "Frequency", - "IncompleteRunDetails", - "ListSortOrder", - "MessageIncompleteDetailsReason", - "MessageRole", - "MessageStatus", - "MessageStreamEvent", - "RunStatus", - "RunStepErrorCode", - "RunStepStatus", - "RunStepStreamEvent", - "RunStepType", - "RunStreamEvent", - "ThreadStreamEvent", - "TruncationStrategy", - "VectorStoreChunkingStrategyRequestType", - "VectorStoreChunkingStrategyResponseType", - "VectorStoreExpirationPolicyAnchor", - "VectorStoreFileBatchStatus", - "VectorStoreFileErrorCode", - "VectorStoreFileStatus", - "VectorStoreFileStatusFilter", - "VectorStoreStatus", - "WeekDays", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore -_patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py deleted file mode 100644 index 7ca731b7639b..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_enums.py +++ /dev/null @@ -1,513 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum -from azure.core import CaseInsensitiveEnumMeta - - -class AgentsApiResponseFormatMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Represents the mode in which the model will handle the return format of a tool call.""" - - AUTO = "auto" - """Default value. Let the model handle the return format.""" - NONE = "none" - """Setting the value to ``none``\\ , will result in a 400 Bad request.""" - - -class AgentsApiToolChoiceOptionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies how the tool choice will be used.""" - - NONE = "none" - """The model will not call a function and instead generates a message.""" - AUTO = "auto" - """The model can pick between generating a message or calling a function.""" - - -class AgentsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Available tool types for agents named tools.""" - - FUNCTION = "function" - """Tool type ``function``""" - CODE_INTERPRETER = "code_interpreter" - """Tool type ``code_interpreter``""" - FILE_SEARCH = "file_search" - """Tool type ``file_search``""" - BING_GROUNDING = "bing_grounding" - """Tool type ``bing_grounding``""" - MICROSOFT_FABRIC = "microsoft_fabric" - """Tool type ``microsoft_fabric``""" - SHAREPOINT = "sharepoint" - """Tool type ``sharepoint``""" - AZURE_AI_SEARCH = "azure_ai_search" - """Tool type ``azure_ai_search``""" - - -class AgentStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Each event in a server-sent events stream has an ``event`` and ``data`` property: - - .. code-block:: - - event: thread.created - data: {"id": "thread_123", "object": "thread", ...} - - We emit events whenever a new object is created, transitions to a new state, or is being - streamed in parts (deltas). For example, we emit ``thread.run.created`` when a new run - is created, ``thread.run.completed`` when a run completes, and so on. When an Agent chooses - to create a message during a run, we emit a ``thread.message.created event``\\ , a - ``thread.message.in_progress`` event, many ``thread.message.delta`` events, and finally a - ``thread.message.completed`` event. - - We may add additional events over time, so we recommend handling unknown events gracefully - in your code. - """ - - THREAD_CREATED = "thread.created" - """Event sent when a new thread is created. The data of this event is of type AgentThread""" - THREAD_RUN_CREATED = "thread.run.created" - """Event sent when a new run is created. The data of this event is of type ThreadRun""" - THREAD_RUN_QUEUED = "thread.run.queued" - """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" - THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" - """Event sent when a run moves to ``in_progress`` status. The data of this event is of type - ThreadRun""" - THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" - """Event sent when a run moves to ``requires_action`` status. The data of this event is of type - ThreadRun""" - THREAD_RUN_COMPLETED = "thread.run.completed" - """Event sent when a run is completed. The data of this event is of type ThreadRun""" - THREAD_RUN_FAILED = "thread.run.failed" - """Event sent when a run fails. The data of this event is of type ThreadRun""" - THREAD_RUN_CANCELLING = "thread.run.cancelling" - """Event sent when a run moves to ``cancelling`` status. The data of this event is of type - ThreadRun""" - THREAD_RUN_CANCELLED = "thread.run.cancelled" - """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" - THREAD_RUN_EXPIRED = "thread.run.expired" - """Event sent when a run is expired. The data of this event is of type ThreadRun""" - THREAD_RUN_STEP_CREATED = "thread.run.step.created" - """Event sent when a new thread run step is created. The data of this event is of type RunStep""" - THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" - """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type - RunStep""" - THREAD_RUN_STEP_DELTA = "thread.run.step.delta" - """Event sent when a run step is being streamed. The data of this event is of type - RunStepDeltaChunk""" - THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" - """Event sent when a run step is completed. The data of this event is of type RunStep""" - THREAD_RUN_STEP_FAILED = "thread.run.step.failed" - """Event sent when a run step fails. The data of this event is of type RunStep""" - THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" - """Event sent when a run step is cancelled. The data of this event is of type RunStep""" - THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" - """Event sent when a run step is expired. The data of this event is of type RunStep""" - THREAD_MESSAGE_CREATED = "thread.message.created" - """Event sent when a new message is created. The data of this event is of type ThreadMessage""" - THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" - """Event sent when a message moves to ``in_progress`` status. The data of this event is of type - ThreadMessage""" - THREAD_MESSAGE_DELTA = "thread.message.delta" - """Event sent when a message is being streamed. The data of this event is of type - MessageDeltaChunk""" - THREAD_MESSAGE_COMPLETED = "thread.message.completed" - """Event sent when a message is completed. The data of this event is of type ThreadMessage""" - THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" - """Event sent before a message is completed. The data of this event is of type ThreadMessage""" - ERROR = "error" - """Event sent when an error occurs, such as an internal server error or a timeout.""" - DONE = "done" - """Event sent when the stream is done.""" - - -class ApiResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Possible API response formats.""" - - TEXT = "text" - """``text`` format should be used for requests involving any sort of ToolCall.""" - JSON_OBJECT = "json_object" - """Using ``json_object`` format will limit the usage of ToolCall to only functions.""" - - -class AuthenticationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Authentication type used by Azure AI service to connect to another service.""" - - API_KEY = "ApiKey" - """API Key authentication""" - AAD = "AAD" - """Entra ID authentication""" - SAS = "SAS" - """Shared Access Signature (SAS) authentication""" - - -class ConnectionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The Type (or category) of the connection.""" - - AZURE_OPEN_AI = "AzureOpenAI" - """Azure OpenAI service""" - SERVERLESS = "Serverless" - """Serverless API service""" - AZURE_BLOB_STORAGE = "AzureBlob" - """Azure Blob Storage""" - AI_SERVICES = "AIServices" - """Azure AI Services""" - - -class DoneEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Terminal event indicating the successful end of a stream.""" - - DONE = "done" - """Event sent when the stream is done.""" - - -class ErrorEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Terminal event indicating a server side error while streaming.""" - - ERROR = "error" - """Event sent when an error occurs, such as an internal server error or a timeout.""" - - -class FilePurpose(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The possible values denoting the intended usage of a file.""" - - FINE_TUNE = "fine-tune" - """Indicates a file is used for fine tuning input.""" - FINE_TUNE_RESULTS = "fine-tune-results" - """Indicates a file is used for fine tuning results.""" - AGENTS = "assistants" - """Indicates a file is used as input to agents.""" - AGENTS_OUTPUT = "assistants_output" - """Indicates a file is used as output by agents.""" - BATCH = "batch" - """Indicates a file is used as input to .""" - BATCH_OUTPUT = "batch_output" - """Indicates a file is used as output by a vector store batch operation.""" - VISION = "vision" - """Indicates a file is used as input to a vision operation.""" - - -class FileState(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The state of the file.""" - - UPLOADED = "uploaded" - """The file has been uploaded but it's not yet processed. This state is not returned by Azure - OpenAI and exposed only for - compatibility. It can be categorized as an inactive state.""" - PENDING = "pending" - """The operation was created and is not queued to be processed in the future. It can be - categorized as an inactive state.""" - RUNNING = "running" - """The operation has started to be processed. It can be categorized as an active state.""" - PROCESSED = "processed" - """The operation has successfully processed and is ready for consumption. It can be categorized as - a terminal state.""" - ERROR = "error" - """The operation has completed processing with a failure and cannot be further consumed. It can be - categorized as a terminal state.""" - DELETING = "deleting" - """The entity is in the process to be deleted. This state is not returned by Azure OpenAI and - exposed only for compatibility. - It can be categorized as an active state.""" - DELETED = "deleted" - """The entity has been deleted but may still be referenced by other entities predating the - deletion. It can be categorized as a - terminal state.""" - - -class Frequency(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Frequency of the schedule - day, week, month, hour, minute.""" - - MONTH = "Month" - WEEK = "Week" - DAY = "Day" - HOUR = "Hour" - MINUTE = "Minute" - - -class IncompleteRunDetails(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The reason why the run is incomplete. This will point to which specific token limit was reached - over the course of the run. - """ - - MAX_COMPLETION_TOKENS = "max_completion_tokens" - """Maximum completion tokens exceeded""" - MAX_PROMPT_TOKENS = "max_prompt_tokens" - """Maximum prompt tokens exceeded""" - - -class ListSortOrder(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The available sorting options when requesting a list of response objects.""" - - ASCENDING = "asc" - """Specifies an ascending sort order.""" - DESCENDING = "desc" - """Specifies a descending sort order.""" - - -class MessageIncompleteDetailsReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """A set of reasons describing why a message is marked as incomplete.""" - - CONTENT_FILTER = "content_filter" - """The run generating the message was terminated due to content filter flagging.""" - MAX_TOKENS = "max_tokens" - """The run generating the message exhausted available tokens before completion.""" - RUN_CANCELLED = "run_cancelled" - """The run generating the message was cancelled before completion.""" - RUN_FAILED = "run_failed" - """The run generating the message failed.""" - RUN_EXPIRED = "run_expired" - """The run generating the message expired.""" - - -class MessageRole(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The possible values for roles attributed to messages in a thread.""" - - USER = "user" - """The role representing the end-user.""" - AGENT = "assistant" - """The role representing the agent.""" - - -class MessageStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The possible execution status values for a thread message.""" - - IN_PROGRESS = "in_progress" - """A run is currently creating this message.""" - INCOMPLETE = "incomplete" - """This message is incomplete. See incomplete_details for more information.""" - COMPLETED = "completed" - """This message was successfully completed by a run.""" - - -class MessageStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Message operation related streaming events.""" - - THREAD_MESSAGE_CREATED = "thread.message.created" - """Event sent when a new message is created. The data of this event is of type ThreadMessage""" - THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" - """Event sent when a message moves to ``in_progress`` status. The data of this event is of type - ThreadMessage""" - THREAD_MESSAGE_DELTA = "thread.message.delta" - """Event sent when a message is being streamed. The data of this event is of type - MessageDeltaChunk""" - THREAD_MESSAGE_COMPLETED = "thread.message.completed" - """Event sent when a message is completed. The data of this event is of type ThreadMessage""" - THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" - """Event sent before a message is completed. The data of this event is of type ThreadMessage""" - - -class RunStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Possible values for the status of an agent thread run.""" - - QUEUED = "queued" - """Represents a run that is queued to start.""" - IN_PROGRESS = "in_progress" - """Represents a run that is in progress.""" - REQUIRES_ACTION = "requires_action" - """Represents a run that needs another operation, such as tool output submission, to continue.""" - CANCELLING = "cancelling" - """Represents a run that is in the process of cancellation.""" - CANCELLED = "cancelled" - """Represents a run that has been cancelled.""" - FAILED = "failed" - """Represents a run that failed.""" - COMPLETED = "completed" - """Represents a run that successfully completed.""" - EXPIRED = "expired" - """Represents a run that expired before it could otherwise finish.""" - - -class RunStepErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Possible error code values attributable to a failed run step.""" - - SERVER_ERROR = "server_error" - """Represents a server error.""" - RATE_LIMIT_EXCEEDED = "rate_limit_exceeded" - """Represents an error indicating configured rate limits were exceeded.""" - - -class RunStepStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Possible values for the status of a run step.""" - - IN_PROGRESS = "in_progress" - """Represents a run step still in progress.""" - CANCELLED = "cancelled" - """Represents a run step that was cancelled.""" - FAILED = "failed" - """Represents a run step that failed.""" - COMPLETED = "completed" - """Represents a run step that successfully completed.""" - EXPIRED = "expired" - """Represents a run step that expired before otherwise finishing.""" - - -class RunStepStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Run step operation related streaming events.""" - - THREAD_RUN_STEP_CREATED = "thread.run.step.created" - """Event sent when a new thread run step is created. The data of this event is of type RunStep""" - THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" - """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type - RunStep""" - THREAD_RUN_STEP_DELTA = "thread.run.step.delta" - """Event sent when a run step is being streamed. The data of this event is of type - RunStepDeltaChunk""" - THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" - """Event sent when a run step is completed. The data of this event is of type RunStep""" - THREAD_RUN_STEP_FAILED = "thread.run.step.failed" - """Event sent when a run step fails. The data of this event is of type RunStep""" - THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" - """Event sent when a run step is cancelled. The data of this event is of type RunStep""" - THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" - """Event sent when a run step is expired. The data of this event is of type RunStep""" - - -class RunStepType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The possible types of run steps.""" - - MESSAGE_CREATION = "message_creation" - """Represents a run step to create a message.""" - TOOL_CALLS = "tool_calls" - """Represents a run step that calls tools.""" - - -class RunStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Run operation related streaming events.""" - - THREAD_RUN_CREATED = "thread.run.created" - """Event sent when a new run is created. The data of this event is of type ThreadRun""" - THREAD_RUN_QUEUED = "thread.run.queued" - """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" - THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" - """Event sent when a run moves to ``in_progress`` status. The data of this event is of type - ThreadRun""" - THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" - """Event sent when a run moves to ``requires_action`` status. The data of this event is of type - ThreadRun""" - THREAD_RUN_COMPLETED = "thread.run.completed" - """Event sent when a run is completed. The data of this event is of type ThreadRun""" - THREAD_RUN_FAILED = "thread.run.failed" - """Event sent when a run fails. The data of this event is of type ThreadRun""" - THREAD_RUN_CANCELLING = "thread.run.cancelling" - """Event sent when a run moves to ``cancelling`` status. The data of this event is of type - ThreadRun""" - THREAD_RUN_CANCELLED = "thread.run.cancelled" - """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" - THREAD_RUN_EXPIRED = "thread.run.expired" - """Event sent when a run is expired. The data of this event is of type ThreadRun""" - - -class ThreadStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Thread operation related streaming events.""" - - THREAD_CREATED = "thread.created" - """Event sent when a new thread is created. The data of this event is of type AgentThread""" - - -class TruncationStrategy(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Possible truncation strategies for the thread.""" - - AUTO = "auto" - """Default value. Messages in the middle of the thread will be dropped to fit the context length - of the model.""" - LAST_MESSAGES = "last_messages" - """The thread will truncate to the ``lastMessages`` count of recent messages.""" - - -class VectorStoreChunkingStrategyRequestType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Type of chunking strategy.""" - - AUTO = "auto" - STATIC = "static" - - -class VectorStoreChunkingStrategyResponseType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Type of chunking strategy.""" - - OTHER = "other" - STATIC = "static" - - -class VectorStoreExpirationPolicyAnchor(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Describes the relationship between the days and the expiration of this vector store.""" - - LAST_ACTIVE_AT = "last_active_at" - """The expiration policy is based on the last time the vector store was active.""" - - -class VectorStoreFileBatchStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The status of the vector store file batch.""" - - IN_PROGRESS = "in_progress" - """The vector store is still processing this file batch.""" - COMPLETED = "completed" - """the vector store file batch is ready for use.""" - CANCELLED = "cancelled" - """The vector store file batch was cancelled.""" - FAILED = "failed" - """The vector store file batch failed to process.""" - - -class VectorStoreFileErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Error code variants for vector store file processing.""" - - INTERNAL_ERROR = "internal_error" - """An internal error occurred.""" - FILE_NOT_FOUND = "file_not_found" - """The file was not found.""" - PARSING_ERROR = "parsing_error" - """The file could not be parsed.""" - UNHANDLED_MIME_TYPE = "unhandled_mime_type" - """The file has an unhandled mime type.""" - - -class VectorStoreFileStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Vector store file status.""" - - IN_PROGRESS = "in_progress" - """The file is currently being processed.""" - COMPLETED = "completed" - """The file has been successfully processed.""" - FAILED = "failed" - """The file has failed to process.""" - CANCELLED = "cancelled" - """The file was cancelled.""" - - -class VectorStoreFileStatusFilter(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Query parameter filter for vector store file retrieval endpoint.""" - - IN_PROGRESS = "in_progress" - """Retrieve only files that are currently being processed""" - COMPLETED = "completed" - """Retrieve only files that have been successfully processed""" - FAILED = "failed" - """Retrieve only files that have failed to process""" - CANCELLED = "cancelled" - """Retrieve only files that were cancelled""" - - -class VectorStoreStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Vector store possible status.""" - - EXPIRED = "expired" - """expired status indicates that this vector store has expired and is no longer available for use.""" - IN_PROGRESS = "in_progress" - """in_progress status indicates that this vector store is still processing files.""" - COMPLETED = "completed" - """completed status indicates that this vector store is ready for use.""" - - -class WeekDays(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """WeekDay of the schedule - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday.""" - - MONDAY = "Monday" - TUESDAY = "Tuesday" - WEDNESDAY = "Wednesday" - THURSDAY = "Thursday" - FRIDAY = "Friday" - SATURDAY = "Saturday" - SUNDAY = "Sunday" diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py deleted file mode 100644 index c3e819b61daf..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_models.py +++ /dev/null @@ -1,6104 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload - -from .. import _model_base -from .._model_base import rest_discriminator, rest_field -from ._enums import ( - AuthenticationType, - RunStepType, - VectorStoreChunkingStrategyRequestType, - VectorStoreChunkingStrategyResponseType, -) - -if TYPE_CHECKING: - from .. import _types, models as _models - - -class Agent(_model_base.Model): # pylint: disable=too-many-instance-attributes - """Represents an agent that can call the model and use tools. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always assistant. Required. Default value is - "assistant". - :vartype object: str - :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar name: The name of the agent. Required. - :vartype name: str - :ivar description: The description of the agent. Required. - :vartype description: str - :ivar model: The ID of the model to use. Required. - :vartype model: str - :ivar instructions: The system instructions for the agent to use. Required. - :vartype instructions: str - :ivar tools: The collection of tools enabled for the agent. Required. - :vartype tools: list[~azure.ai.client.models.ToolDefinition] - :ivar tool_resources: A set of resources that are used by the agent's tools. The resources are - specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Required. - :vartype tool_resources: ~azure.ai.client.models.ToolResources - :ivar temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Required. - :vartype temperature: float - :ivar top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Required. - :vartype top_p: float - :ivar response_format: The response format of the tool calls used by this agent. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat - :vartype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode or - ~azure.ai.client.models.AgentsApiResponseFormat - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required. - :vartype metadata: dict[str, str] - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["assistant"] = rest_field() - """The object type, which is always assistant. Required. Default value is \"assistant\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this object was created. Required.""" - name: str = rest_field() - """The name of the agent. Required.""" - description: str = rest_field() - """The description of the agent. Required.""" - model: str = rest_field() - """The ID of the model to use. Required.""" - instructions: str = rest_field() - """The system instructions for the agent to use. Required.""" - tools: List["_models.ToolDefinition"] = rest_field() - """The collection of tools enabled for the agent. Required.""" - tool_resources: "_models.ToolResources" = rest_field() - """A set of resources that are used by the agent's tools. The resources are specific to the type - of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Required.""" - temperature: float = rest_field() - """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - more random, - while lower values like 0.2 will make it more focused and deterministic. Required.""" - top_p: float = rest_field() - """An alternative to sampling with temperature, called nucleus sampling, where the model considers - the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Required.""" - response_format: Optional["_types.AgentsApiResponseFormatOption"] = rest_field() - """The response format of the tool calls used by this agent. Is one of the following types: str, - Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat""" - metadata: Dict[str, str] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - created_at: datetime.datetime, - name: str, - description: str, - model: str, - instructions: str, - tools: List["_models.ToolDefinition"], - tool_resources: "_models.ToolResources", - temperature: float, - top_p: float, - metadata: Dict[str, str], - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["assistant"] = "assistant" - - -class AgentDeletionStatus(_model_base.Model): - """The status of an agent deletion operation. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The ID of the resource specified for deletion. Required. - :vartype id: str - :ivar deleted: A value indicating whether deletion was successful. Required. - :vartype deleted: bool - :ivar object: The object type, which is always 'assistant.deleted'. Required. Default value is - "assistant.deleted". - :vartype object: str - """ - - id: str = rest_field() - """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() - """A value indicating whether deletion was successful. Required.""" - object: Literal["assistant.deleted"] = rest_field() - """The object type, which is always 'assistant.deleted'. Required. Default value is - \"assistant.deleted\".""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - deleted: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["assistant.deleted"] = "assistant.deleted" - - -class AgentsApiResponseFormat(_model_base.Model): - """An object describing the expected output of the model. If ``json_object`` only ``function`` - type ``tools`` are allowed to be passed to the Run. - If ``text`` the model can return text or any value needed. - - :ivar type: Must be one of ``text`` or ``json_object``. Known values are: "text" and - "json_object". - :vartype type: str or ~azure.ai.client.models.ApiResponseFormat - """ - - type: Optional[Union[str, "_models.ApiResponseFormat"]] = rest_field() - """Must be one of ``text`` or ``json_object``. Known values are: \"text\" and \"json_object\".""" - - @overload - def __init__( - self, - *, - type: Optional[Union[str, "_models.ApiResponseFormat"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class AgentsNamedToolChoice(_model_base.Model): - """Specifies a tool the model should use. Use to force the model to call a specific tool. - - - :ivar type: the type of tool. If type is ``function``\\ , the function name must be set. - Required. Known values are: "function", "code_interpreter", "file_search", "bing_grounding", - "microsoft_fabric", "sharepoint", and "azure_ai_search". - :vartype type: str or ~azure.ai.client.models.AgentsNamedToolChoiceType - :ivar function: The name of the function to call. - :vartype function: ~azure.ai.client.models.FunctionName - """ - - type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field() - """the type of tool. If type is ``function``\ , the function name must be set. Required. Known - values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", - \"microsoft_fabric\", \"sharepoint\", and \"azure_ai_search\".""" - function: Optional["_models.FunctionName"] = rest_field() - """The name of the function to call.""" - - @overload - def __init__( - self, - *, - type: Union[str, "_models.AgentsNamedToolChoiceType"], - function: Optional["_models.FunctionName"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class AgentThread(_model_base.Model): - """Information about a single thread associated with an agent. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always 'thread'. Required. Default value is "thread". - :vartype object: str - :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar tool_resources: A set of resources that are made available to the agent's tools in this - thread. The resources are specific to the type - of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the - ``file_search`` tool requires a list - of vector store IDs. Required. - :vartype tool_resources: ~azure.ai.client.models.ToolResources - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required. - :vartype metadata: dict[str, str] - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["thread"] = rest_field() - """The object type, which is always 'thread'. Required. Default value is \"thread\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this object was created. Required.""" - tool_resources: "_models.ToolResources" = rest_field() - """A set of resources that are made available to the agent's tools in this thread. The resources - are specific to the type - of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the - ``file_search`` tool requires a list - of vector store IDs. Required.""" - metadata: Dict[str, str] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - created_at: datetime.datetime, - tool_resources: "_models.ToolResources", - metadata: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread"] = "thread" - - -class AgentThreadCreationOptions(_model_base.Model): - """The details used to create a new agent thread. - - :ivar messages: The initial messages to associate with the new thread. - :vartype messages: list[~azure.ai.client.models.ThreadMessageOptions] - :ivar tool_resources: A set of resources that are made available to the agent's tools in this - thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. - :vartype tool_resources: ~azure.ai.client.models.ToolResources - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. - :vartype metadata: dict[str, str] - """ - - messages: Optional[List["_models.ThreadMessageOptions"]] = rest_field() - """The initial messages to associate with the new thread.""" - tool_resources: Optional["_models.ToolResources"] = rest_field() - """A set of resources that are made available to the agent's tools in this thread. The resources - are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the - ``file_search`` tool requires - a list of vector store IDs.""" - metadata: Optional[Dict[str, str]] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length.""" - - @overload - def __init__( - self, - *, - messages: Optional[List["_models.ThreadMessageOptions"]] = None, - tool_resources: Optional["_models.ToolResources"] = None, - metadata: Optional[Dict[str, str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class InputData(_model_base.Model): - """Abstract data class for input data configuration. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - AppInsightsConfiguration, Dataset - - - :ivar type: Type of the data. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """Type of the data. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class AppInsightsConfiguration(InputData, discriminator="app_insights"): - """Data Source for Application Insight. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar type: Required. Default value is "app_insights". - :vartype type: str - :ivar resource_id: LogAnalytic Workspace resourceID associated with AppInsights. Required. - :vartype resource_id: str - :ivar query: Query to fetch the data. Required. - :vartype query: str - :ivar service_name: Service name. Required. - :vartype service_name: str - """ - - type: Literal["app_insights"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore - """Required. Default value is \"app_insights\".""" - resource_id: str = rest_field(name="resourceId") - """LogAnalytic Workspace resourceID associated with AppInsights. Required.""" - query: str = rest_field() - """Query to fetch the data. Required.""" - service_name: str = rest_field(name="serviceName") - """Service name. Required.""" - - @overload - def __init__( - self, - *, - resource_id: str, - query: str, - service_name: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="app_insights", **kwargs) - - -class AzureAISearchResource(_model_base.Model): - """A set of index resources used by the ``azure_ai_search`` tool. - - :ivar index_list: The indices attached to this agent. There can be a maximum of 1 index - resource attached to the agent. - :vartype index_list: list[~azure.ai.client.models.IndexResource] - """ - - index_list: Optional[List["_models.IndexResource"]] = rest_field(name="indexes") - """The indices attached to this agent. There can be a maximum of 1 index - resource attached to the agent.""" - - @overload - def __init__( - self, - *, - index_list: Optional[List["_models.IndexResource"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class ToolDefinition(_model_base.Model): - """An abstract representation of an input tool definition that an agent can use. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - AzureAISearchToolDefinition, BingGroundingToolDefinition, CodeInterpreterToolDefinition, - FileSearchToolDefinition, FunctionToolDefinition, MicrosoftFabricToolDefinition, - SharepointToolDefinition - - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class AzureAISearchToolDefinition(ToolDefinition, discriminator="azure_ai_search"): - """The input definition information for an Azure AI search tool as used to configure an agent. - - - :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is - "azure_ai_search". - :vartype type: str - """ - - type: Literal["azure_ai_search"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'azure_ai_search'. Required. Default value is - \"azure_ai_search\".""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="azure_ai_search", **kwargs) - - -class BingGroundingToolDefinition(ToolDefinition, discriminator="bing_grounding"): - """The input definition information for a bing grounding search tool as used to configure an - agent. - - - :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is - "bing_grounding". - :vartype type: str - """ - - type: Literal["bing_grounding"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'bing_grounding'. Required. Default value is - \"bing_grounding\".""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="bing_grounding", **kwargs) - - -class CodeInterpreterToolDefinition(ToolDefinition, discriminator="code_interpreter"): - """The input definition information for a code interpreter tool as used to configure an agent. - - - :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is - "code_interpreter". - :vartype type: str - """ - - type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'code_interpreter'. Required. Default value is - \"code_interpreter\".""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="code_interpreter", **kwargs) - - -class CodeInterpreterToolResource(_model_base.Model): - """A set of resources that are used by the ``code_interpreter`` tool. - - :ivar file_ids: A list of file IDs made available to the ``code_interpreter`` tool. There can - be a maximum of 20 files - associated with the tool. - :vartype file_ids: list[str] - """ - - file_ids: Optional[List[str]] = rest_field() - """A list of file IDs made available to the ``code_interpreter`` tool. There can be a maximum of - 20 files - associated with the tool.""" - - @overload - def __init__( - self, - *, - file_ids: Optional[List[str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class ConnectionListResource(_model_base.Model): - """A set of connection resources currently used by either the ``bing_grounding``\\ , - ``microsoft_fabric``\\ , or ``sharepoint`` tools. - - :ivar connection_list: The connections attached to this agent. There can be a maximum of 1 - connection - resource attached to the agent. - :vartype connection_list: list[~azure.ai.client.models.ConnectionResource] - """ - - connection_list: Optional[List["_models.ConnectionResource"]] = rest_field(name="connections") - """The connections attached to this agent. There can be a maximum of 1 connection - resource attached to the agent.""" - - @overload - def __init__( - self, - *, - connection_list: Optional[List["_models.ConnectionResource"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class ConnectionProperties(_model_base.Model): - """Connection properties. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ConnectionPropertiesAADAuth, ConnectionPropertiesApiKeyAuth, ConnectionPropertiesSASAuth - - - :ivar auth_type: Authentication type of the connection target. Required. Known values are: - "ApiKey", "AAD", and "SAS". - :vartype auth_type: str or ~azure.ai.client.models.AuthenticationType - """ - - __mapping__: Dict[str, _model_base.Model] = {} - auth_type: str = rest_discriminator(name="authType") - """Authentication type of the connection target. Required. Known values are: \"ApiKey\", \"AAD\", - and \"SAS\".""" - - -class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): - """Connection properties for connections with AAD authentication (aka ``Entra ID passthrough``\\ - ). - - - :ivar auth_type: Authentication type of the connection target. Required. Entra ID - authentication - :vartype auth_type: str or ~azure.ai.client.models.AAD - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". - :vartype category: str or ~azure.ai.client.models.ConnectionType - :ivar target: The connection URL to be used for this service. Required. - :vartype target: str - """ - - auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore - """Authentication type of the connection target. Required. Entra ID authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", and \"AIServices\".""" - target: str = rest_field() - """The connection URL to be used for this service. Required.""" - - -class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey"): - """Connection properties for connections with API key authentication. - - - :ivar auth_type: Authentication type of the connection target. Required. API Key authentication - :vartype auth_type: str or ~azure.ai.client.models.API_KEY - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". - :vartype category: str or ~azure.ai.client.models.ConnectionType - :ivar credentials: Credentials will only be present for authType=ApiKey. Required. - :vartype credentials: ~azure.ai.client.models._models.CredentialsApiKeyAuth - :ivar target: The connection URL to be used for this service. Required. - :vartype target: str - """ - - auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore - """Authentication type of the connection target. Required. API Key authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", and \"AIServices\".""" - credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() - """Credentials will only be present for authType=ApiKey. Required.""" - target: str = rest_field() - """The connection URL to be used for this service. Required.""" - - -class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): - """Connection properties for connections with SAS authentication. - - - :ivar auth_type: Authentication type of the connection target. Required. Shared Access - Signature (SAS) authentication - :vartype auth_type: str or ~azure.ai.client.models.SAS - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". - :vartype category: str or ~azure.ai.client.models.ConnectionType - :ivar credentials: Credentials will only be present for authType=ApiKey. Required. - :vartype credentials: ~azure.ai.client.models._models.CredentialsSASAuth - :ivar target: The connection URL to be used for this service. Required. - :vartype target: str - """ - - auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType") # type: ignore - """Authentication type of the connection target. Required. Shared Access Signature (SAS) - authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", and \"AIServices\".""" - credentials: "_models._models.CredentialsSASAuth" = rest_field() - """Credentials will only be present for authType=ApiKey. Required.""" - target: str = rest_field() - """The connection URL to be used for this service. Required.""" - - -class ConnectionResource(_model_base.Model): - """A connection resource. - - - :ivar connection_id: A connection in a ConnectionListResource attached to this agent. Required. - :vartype connection_id: str - """ - - connection_id: str = rest_field() - """A connection in a ConnectionListResource attached to this agent. Required.""" - - @overload - def __init__( - self, - *, - connection_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class ConnectionsListResponse(_model_base.Model): - """Response from the list operation. - - - :ivar value: A list of connection list secrets. Required. - :vartype value: list[~azure.ai.client.models._models.ConnectionsListSecretsResponse] - """ - - value: List["_models._models.ConnectionsListSecretsResponse"] = rest_field() - """A list of connection list secrets. Required.""" - - -class ConnectionsListSecretsResponse(_model_base.Model): - """Response from the listSecrets operation. - - - :ivar id: A unique identifier for the connection. Required. - :vartype id: str - :ivar name: The name of the resource. Required. - :vartype name: str - :ivar properties: The properties of the resource. Required. - :vartype properties: ~azure.ai.client.models._models.ConnectionProperties - """ - - id: str = rest_field() - """A unique identifier for the connection. Required.""" - name: str = rest_field() - """The name of the resource. Required.""" - properties: "_models._models.ConnectionProperties" = rest_field() - """The properties of the resource. Required.""" - - -class CredentialsApiKeyAuth(_model_base.Model): - """The credentials needed for API key authentication. - - - :ivar key: The API key. Required. - :vartype key: str - """ - - key: str = rest_field() - """The API key. Required.""" - - -class CredentialsSASAuth(_model_base.Model): - """The credentials needed for Shared Access Signatures (SAS) authentication. - - - :ivar sas: The Shared Access Signatures (SAS) token. Required. - :vartype sas: str - """ - - sas: str = rest_field(name="SAS") - """The Shared Access Signatures (SAS) token. Required.""" - - -class Trigger(_model_base.Model): - """Abstract data class for input data configuration. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - CronTrigger, RecurrenceTrigger - - - :ivar type: Type of the trigger. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """Type of the trigger. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class CronTrigger(Trigger, discriminator="Cron"): - """Cron Trigger Definition. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar type: Required. Default value is "Cron". - :vartype type: str - :ivar expression: Cron expression for the trigger. Required. - :vartype expression: str - """ - - type: Literal["Cron"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore - """Required. Default value is \"Cron\".""" - expression: str = rest_field() - """Cron expression for the trigger. Required.""" - - @overload - def __init__( - self, - *, - expression: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="Cron", **kwargs) - - -class Dataset(InputData, discriminator="dataset"): - """Dataset as source for evaluation. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar type: Required. Default value is "dataset". - :vartype type: str - :ivar id: Evaluation input data. Required. - :vartype id: str - """ - - type: Literal["dataset"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore - """Required. Default value is \"dataset\".""" - id: str = rest_field() - """Evaluation input data. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="dataset", **kwargs) - - -class Evaluation(_model_base.Model): - """Evaluation Definition. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: Identifier of the evaluation. Required. - :vartype id: str - :ivar data: Data for evaluation. Required. - :vartype data: ~azure.ai.client.models.InputData - :ivar display_name: Display Name for evaluation. It helps to find evaluation easily in AI - Studio. It does not need to be unique. - :vartype display_name: str - :ivar description: Description of the evaluation. It can be used to store additional - information about the evaluation and is mutable. - :vartype description: str - :ivar system_data: Metadata containing createdBy and modifiedBy information. - :vartype system_data: ~azure.ai.client.models.SystemData - :ivar status: Status of the evaluation. It is set by service and is read-only. - :vartype status: str - :ivar tags: Evaluation's tags. Unlike properties, tags are fully mutable. - :vartype tags: dict[str, str] - :ivar properties: Evaluation's properties. Unlike tags, properties are add-only. Once added, a - property cannot be removed. - :vartype properties: dict[str, str] - :ivar evaluators: Evaluators to be used for the evaluation. Required. - :vartype evaluators: dict[str, ~azure.ai.client.models.EvaluatorConfiguration] - """ - - id: str = rest_field(visibility=["read"]) - """Identifier of the evaluation. Required.""" - data: "_models.InputData" = rest_field(visibility=["read", "create"]) - """Data for evaluation. Required.""" - display_name: Optional[str] = rest_field(name="displayName") - """Display Name for evaluation. It helps to find evaluation easily in AI Studio. It does not need - to be unique.""" - description: Optional[str] = rest_field() - """Description of the evaluation. It can be used to store additional information about the - evaluation and is mutable.""" - system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) - """Metadata containing createdBy and modifiedBy information.""" - status: Optional[str] = rest_field(visibility=["read"]) - """Status of the evaluation. It is set by service and is read-only.""" - tags: Optional[Dict[str, str]] = rest_field() - """Evaluation's tags. Unlike properties, tags are fully mutable.""" - properties: Optional[Dict[str, str]] = rest_field(visibility=["read", "create"]) - """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be - removed.""" - evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field(visibility=["read", "create"]) - """Evaluators to be used for the evaluation. Required.""" - - @overload - def __init__( - self, - *, - data: "_models.InputData", - evaluators: Dict[str, "_models.EvaluatorConfiguration"], - display_name: Optional[str] = None, - description: Optional[str] = None, - tags: Optional[Dict[str, str]] = None, - properties: Optional[Dict[str, str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class EvaluationSchedule(_model_base.Model): # pylint: disable=too-many-instance-attributes - """Evaluation Schedule Definition. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: Identifier of the evaluation. Required. - :vartype id: str - :ivar data: Data for evaluation. Required. - :vartype data: ~azure.ai.client.models.InputData - :ivar display_name: Display Name for evaluation. It helps to find evaluation easily in AI - Studio. It does not need to be unique. - :vartype display_name: str - :ivar description: Description of the evaluation. It can be used to store additional - information about the evaluation and is mutable. - :vartype description: str - :ivar system_data: Metadata containing createdBy and modifiedBy information. - :vartype system_data: ~azure.ai.client.models.SystemData - :ivar provisioning_status: Status of the evaluation. It is set by service and is read-only. - :vartype provisioning_status: str - :ivar tags: Evaluation's tags. Unlike properties, tags are fully mutable. - :vartype tags: dict[str, str] - :ivar properties: Evaluation's properties. Unlike tags, properties are add-only. Once added, a - property cannot be removed. - :vartype properties: dict[str, str] - :ivar evaluators: Evaluators to be used for the evaluation. Required. - :vartype evaluators: dict[str, ~azure.ai.client.models.EvaluatorConfiguration] - :ivar trigger: Trigger for the evaluation. Required. - :vartype trigger: ~azure.ai.client.models.Trigger - :ivar sampling_strategy: Sampling strategy for the evaluation. Required. - :vartype sampling_strategy: ~azure.ai.client.models.SamplingStrategy - """ - - id: str = rest_field(visibility=["read"]) - """Identifier of the evaluation. Required.""" - data: "_models.InputData" = rest_field(visibility=["read", "create"]) - """Data for evaluation. Required.""" - display_name: Optional[str] = rest_field(name="displayName") - """Display Name for evaluation. It helps to find evaluation easily in AI Studio. It does not need - to be unique.""" - description: Optional[str] = rest_field() - """Description of the evaluation. It can be used to store additional information about the - evaluation and is mutable.""" - system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) - """Metadata containing createdBy and modifiedBy information.""" - provisioning_status: Optional[str] = rest_field(name="provisioningStatus", visibility=["read"]) - """Status of the evaluation. It is set by service and is read-only.""" - tags: Optional[Dict[str, str]] = rest_field() - """Evaluation's tags. Unlike properties, tags are fully mutable.""" - properties: Optional[Dict[str, str]] = rest_field(visibility=["read", "create"]) - """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be - removed.""" - evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field(visibility=["read", "create"]) - """Evaluators to be used for the evaluation. Required.""" - trigger: "_models.Trigger" = rest_field() - """Trigger for the evaluation. Required.""" - sampling_strategy: "_models.SamplingStrategy" = rest_field(name="samplingStrategy") - """Sampling strategy for the evaluation. Required.""" - - @overload - def __init__( - self, - *, - data: "_models.InputData", - evaluators: Dict[str, "_models.EvaluatorConfiguration"], - trigger: "_models.Trigger", - sampling_strategy: "_models.SamplingStrategy", - display_name: Optional[str] = None, - description: Optional[str] = None, - tags: Optional[Dict[str, str]] = None, - properties: Optional[Dict[str, str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class EvaluatorConfiguration(_model_base.Model): - """Evaluator Configuration. - - - :ivar id: Identifier of the evaluator. Required. - :vartype id: str - :ivar init_params: Initialization parameters of the evaluator. - :vartype init_params: dict[str, any] - :ivar data_mapping: Data parameters of the evaluator. - :vartype data_mapping: dict[str, str] - """ - - id: str = rest_field() - """Identifier of the evaluator. Required.""" - init_params: Optional[Dict[str, Any]] = rest_field(name="initParams") - """Initialization parameters of the evaluator.""" - data_mapping: Optional[Dict[str, str]] = rest_field(name="dataMapping") - """Data parameters of the evaluator.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - init_params: Optional[Dict[str, Any]] = None, - data_mapping: Optional[Dict[str, str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class FileContentResponse(_model_base.Model): - """A response from a file get content operation. - - - :ivar content: The content of the file, in bytes. Required. - :vartype content: bytes - """ - - content: bytes = rest_field(format="base64") - """The content of the file, in bytes. Required.""" - - @overload - def __init__( - self, - *, - content: bytes, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class FileDeletionStatus(_model_base.Model): - """A status response from a file deletion operation. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The ID of the resource specified for deletion. Required. - :vartype id: str - :ivar deleted: A value indicating whether deletion was successful. Required. - :vartype deleted: bool - :ivar object: The object type, which is always 'file'. Required. Default value is "file". - :vartype object: str - """ - - id: str = rest_field() - """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() - """A value indicating whether deletion was successful. Required.""" - object: Literal["file"] = rest_field() - """The object type, which is always 'file'. Required. Default value is \"file\".""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - deleted: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["file"] = "file" - - -class FileListResponse(_model_base.Model): - """The response data from a file list operation. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always 'list'. Required. Default value is "list". - :vartype object: str - :ivar data: The files returned for the request. Required. - :vartype data: list[~azure.ai.client.models.OpenAIFile] - """ - - object: Literal["list"] = rest_field() - """The object type, which is always 'list'. Required. Default value is \"list\".""" - data: List["_models.OpenAIFile"] = rest_field() - """The files returned for the request. Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.OpenAIFile"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class FileSearchToolDefinition(ToolDefinition, discriminator="file_search"): - """The input definition information for a file search tool as used to configure an agent. - - - :ivar type: The object type, which is always 'file_search'. Required. Default value is - "file_search". - :vartype type: str - :ivar file_search: Options overrides for the file search tool. - :vartype file_search: ~azure.ai.client.models.FileSearchToolDefinitionDetails - """ - - type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" - file_search: Optional["_models.FileSearchToolDefinitionDetails"] = rest_field() - """Options overrides for the file search tool.""" - - @overload - def __init__( - self, - *, - file_search: Optional["_models.FileSearchToolDefinitionDetails"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="file_search", **kwargs) - - -class FileSearchToolDefinitionDetails(_model_base.Model): - """Options overrides for the file search tool. - - :ivar max_num_results: The maximum number of results the file search tool should output. The - default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 - inclusive. - - Note that the file search tool may output fewer than ``max_num_results`` results. See the file - search tool documentation for more information. - :vartype max_num_results: int - """ - - max_num_results: Optional[int] = rest_field() - """The maximum number of results the file search tool should output. The default is 20 for gpt-4* - models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. - - Note that the file search tool may output fewer than ``max_num_results`` results. See the file - search tool documentation for more information.""" - - @overload - def __init__( - self, - *, - max_num_results: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class FileSearchToolResource(_model_base.Model): - """A set of resources that are used by the ``file_search`` tool. - - :ivar vector_store_ids: The ID of the vector store attached to this agent. There can be a - maximum of 1 vector - store attached to the agent. - :vartype vector_store_ids: list[str] - """ - - vector_store_ids: Optional[List[str]] = rest_field() - """The ID of the vector store attached to this agent. There can be a maximum of 1 vector - store attached to the agent.""" - - @overload - def __init__( - self, - *, - vector_store_ids: Optional[List[str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class FunctionDefinition(_model_base.Model): - """The input definition information for a function. - - - :ivar name: The name of the function to be called. Required. - :vartype name: str - :ivar description: A description of what the function does, used by the model to choose when - and how to call the function. - :vartype description: str - :ivar parameters: The parameters the functions accepts, described as a JSON Schema object. - Required. - :vartype parameters: any - """ - - name: str = rest_field() - """The name of the function to be called. Required.""" - description: Optional[str] = rest_field() - """A description of what the function does, used by the model to choose when and how to call the - function.""" - parameters: Any = rest_field() - """The parameters the functions accepts, described as a JSON Schema object. Required.""" - - @overload - def __init__( - self, - *, - name: str, - parameters: Any, - description: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class FunctionName(_model_base.Model): - """The function name that will be used, if using the ``function`` tool. - - - :ivar name: The name of the function to call. Required. - :vartype name: str - """ - - name: str = rest_field() - """The name of the function to call. Required.""" - - @overload - def __init__( - self, - *, - name: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class FunctionToolDefinition(ToolDefinition, discriminator="function"): - """The input definition information for a function tool as used to configure an agent. - - - :ivar type: The object type, which is always 'function'. Required. Default value is "function". - :vartype type: str - :ivar function: The definition of the concrete function that the function tool should call. - Required. - :vartype function: ~azure.ai.client.models.FunctionDefinition - """ - - type: Literal["function"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'function'. Required. Default value is \"function\".""" - function: "_models.FunctionDefinition" = rest_field() - """The definition of the concrete function that the function tool should call. Required.""" - - @overload - def __init__( - self, - *, - function: "_models.FunctionDefinition", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="function", **kwargs) - - -class IndexResource(_model_base.Model): - """A Index resource. - - - :ivar index_connection_id: An index connection id in an IndexResource attached to this agent. - Required. - :vartype index_connection_id: str - :ivar index_name: The name of an index in an IndexResource attached to this agent. Required. - :vartype index_name: str - """ - - index_connection_id: str = rest_field() - """An index connection id in an IndexResource attached to this agent. Required.""" - index_name: str = rest_field() - """The name of an index in an IndexResource attached to this agent. Required.""" - - @overload - def __init__( - self, - *, - index_connection_id: str, - index_name: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageAttachment(_model_base.Model): - """This describes to which tools a file has been attached. - - - :ivar file_id: The ID of the file to attach to the message. Required. - :vartype file_id: str - :ivar tools: The tools to add to this file. Required. - :vartype tools: list[~azure.ai.client.models.CodeInterpreterToolDefinition or - ~azure.ai.client.models.FileSearchToolDefinition] - """ - - file_id: str = rest_field() - """The ID of the file to attach to the message. Required.""" - tools: List["_types.MessageAttachmentToolDefinition"] = rest_field() - """The tools to add to this file. Required.""" - - @overload - def __init__( - self, - *, - file_id: str, - tools: List["_types.MessageAttachmentToolDefinition"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageContent(_model_base.Model): - """An abstract representation of a single item of thread message content. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - MessageImageFileContent, MessageTextContent - - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageDelta(_model_base.Model): - """Represents the typed 'delta' payload within a streaming message delta chunk. - - - :ivar role: The entity that produced the message. Required. Known values are: "user" and - "assistant". - :vartype role: str or ~azure.ai.client.models.MessageRole - :ivar content: The content of the message as an array of text and/or images. Required. - :vartype content: list[~azure.ai.client.models.MessageDeltaContent] - """ - - role: Union[str, "_models.MessageRole"] = rest_field() - """The entity that produced the message. Required. Known values are: \"user\" and \"assistant\".""" - content: List["_models.MessageDeltaContent"] = rest_field() - """The content of the message as an array of text and/or images. Required.""" - - @overload - def __init__( - self, - *, - role: Union[str, "_models.MessageRole"], - content: List["_models.MessageDeltaContent"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageDeltaChunk(_model_base.Model): - """Represents a message delta i.e. any changed fields on a message during streaming. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier of the message, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always ``thread.message.delta``. Required. Default - value is "thread.message.delta". - :vartype object: str - :ivar delta: The delta containing the fields that have changed on the Message. Required. - :vartype delta: ~azure.ai.client.models.MessageDelta - """ - - id: str = rest_field() - """The identifier of the message, which can be referenced in API endpoints. Required.""" - object: Literal["thread.message.delta"] = rest_field() - """The object type, which is always ``thread.message.delta``. Required. Default value is - \"thread.message.delta\".""" - delta: "_models.MessageDelta" = rest_field() - """The delta containing the fields that have changed on the Message. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - delta: "_models.MessageDelta", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread.message.delta"] = "thread.message.delta" - - -class MessageDeltaContent(_model_base.Model): - """The abstract base representation of a partial streamed message content payload. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - MessageDeltaImageFileContent, MessageDeltaTextContent - - - :ivar index: The index of the content part of the message. Required. - :vartype index: int - :ivar type: The type of content for this content part. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - index: int = rest_field() - """The index of the content part of the message. Required.""" - type: str = rest_discriminator(name="type") - """The type of content for this content part. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - index: int, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageDeltaImageFileContent(MessageDeltaContent, discriminator="image_file"): - """Represents a streamed image file content part within a streaming message delta chunk. - - - :ivar index: The index of the content part of the message. Required. - :vartype index: int - :ivar type: The type of content for this content part, which is always "image_file.". Required. - Default value is "image_file". - :vartype type: str - :ivar image_file: The image_file data. - :vartype image_file: ~azure.ai.client.models.MessageDeltaImageFileContentObject - """ - - type: Literal["image_file"] = rest_discriminator(name="type") # type: ignore - """The type of content for this content part, which is always \"image_file.\". Required. Default - value is \"image_file\".""" - image_file: Optional["_models.MessageDeltaImageFileContentObject"] = rest_field() - """The image_file data.""" - - @overload - def __init__( - self, - *, - index: int, - image_file: Optional["_models.MessageDeltaImageFileContentObject"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="image_file", **kwargs) - - -class MessageDeltaImageFileContentObject(_model_base.Model): - """Represents the 'image_file' payload within streaming image file content. - - :ivar file_id: The file ID of the image in the message content. - :vartype file_id: str - """ - - file_id: Optional[str] = rest_field() - """The file ID of the image in the message content.""" - - @overload - def __init__( - self, - *, - file_id: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageDeltaTextAnnotation(_model_base.Model): - """The abstract base representation of a streamed text content part's text annotation. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - MessageDeltaTextFileCitationAnnotation, MessageDeltaTextFilePathAnnotation - - - :ivar index: The index of the annotation within a text content part. Required. - :vartype index: int - :ivar type: The type of the text content annotation. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - index: int = rest_field() - """The index of the annotation within a text content part. Required.""" - type: str = rest_discriminator(name="type") - """The type of the text content annotation. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - index: int, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageDeltaTextContent(MessageDeltaContent, discriminator="text"): - """Represents a streamed text content part within a streaming message delta chunk. - - - :ivar index: The index of the content part of the message. Required. - :vartype index: int - :ivar type: The type of content for this content part, which is always "text.". Required. - Default value is "text". - :vartype type: str - :ivar text: The text content details. - :vartype text: ~azure.ai.client.models.MessageDeltaTextContentObject - """ - - type: Literal["text"] = rest_discriminator(name="type") # type: ignore - """The type of content for this content part, which is always \"text.\". Required. Default value - is \"text\".""" - text: Optional["_models.MessageDeltaTextContentObject"] = rest_field() - """The text content details.""" - - @overload - def __init__( - self, - *, - index: int, - text: Optional["_models.MessageDeltaTextContentObject"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="text", **kwargs) - - -class MessageDeltaTextContentObject(_model_base.Model): - """Represents the data of a streamed text content part within a streaming message delta chunk. - - :ivar value: The data that makes up the text. - :vartype value: str - :ivar annotations: Annotations for the text. - :vartype annotations: list[~azure.ai.client.models.MessageDeltaTextAnnotation] - """ - - value: Optional[str] = rest_field() - """The data that makes up the text.""" - annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = rest_field() - """Annotations for the text.""" - - @overload - def __init__( - self, - *, - value: Optional[str] = None, - annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageDeltaTextFileCitationAnnotation(MessageDeltaTextAnnotation, discriminator="file_citation"): - """Represents a streamed file citation applied to a streaming text content part. - - - :ivar index: The index of the annotation within a text content part. Required. - :vartype index: int - :ivar type: The type of the text content annotation, which is always "file_citation.". - Required. Default value is "file_citation". - :vartype type: str - :ivar file_citation: The file citation information. - :vartype file_citation: ~azure.ai.client.models.MessageDeltaTextFileCitationAnnotationObject - :ivar text: The text in the message content that needs to be replaced. - :vartype text: str - :ivar start_index: The start index of this annotation in the content text. - :vartype start_index: int - :ivar end_index: The end index of this annotation in the content text. - :vartype end_index: int - """ - - type: Literal["file_citation"] = rest_discriminator(name="type") # type: ignore - """The type of the text content annotation, which is always \"file_citation.\". Required. Default - value is \"file_citation\".""" - file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = rest_field() - """The file citation information.""" - text: Optional[str] = rest_field() - """The text in the message content that needs to be replaced.""" - start_index: Optional[int] = rest_field() - """The start index of this annotation in the content text.""" - end_index: Optional[int] = rest_field() - """The end index of this annotation in the content text.""" - - @overload - def __init__( - self, - *, - index: int, - file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = None, - text: Optional[str] = None, - start_index: Optional[int] = None, - end_index: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="file_citation", **kwargs) - - -class MessageDeltaTextFileCitationAnnotationObject(_model_base.Model): # pylint: disable=name-too-long - """Represents the data of a streamed file citation as applied to a streaming text content part. - - :ivar file_id: The ID of the specific file the citation is from. - :vartype file_id: str - :ivar quote: The specific quote in the cited file. - :vartype quote: str - """ - - file_id: Optional[str] = rest_field() - """The ID of the specific file the citation is from.""" - quote: Optional[str] = rest_field() - """The specific quote in the cited file.""" - - @overload - def __init__( - self, - *, - file_id: Optional[str] = None, - quote: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageDeltaTextFilePathAnnotation(MessageDeltaTextAnnotation, discriminator="file_path"): - """Represents a streamed file path annotation applied to a streaming text content part. - - - :ivar index: The index of the annotation within a text content part. Required. - :vartype index: int - :ivar type: The type of the text content annotation, which is always "file_path.". Required. - Default value is "file_path". - :vartype type: str - :ivar file_path: The file path information. - :vartype file_path: ~azure.ai.client.models.MessageDeltaTextFilePathAnnotationObject - :ivar start_index: The start index of this annotation in the content text. - :vartype start_index: int - :ivar end_index: The end index of this annotation in the content text. - :vartype end_index: int - :ivar text: The text in the message content that needs to be replaced. - :vartype text: str - """ - - type: Literal["file_path"] = rest_discriminator(name="type") # type: ignore - """The type of the text content annotation, which is always \"file_path.\". Required. Default - value is \"file_path\".""" - file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = rest_field() - """The file path information.""" - start_index: Optional[int] = rest_field() - """The start index of this annotation in the content text.""" - end_index: Optional[int] = rest_field() - """The end index of this annotation in the content text.""" - text: Optional[str] = rest_field() - """The text in the message content that needs to be replaced.""" - - @overload - def __init__( - self, - *, - index: int, - file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = None, - start_index: Optional[int] = None, - end_index: Optional[int] = None, - text: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="file_path", **kwargs) - - -class MessageDeltaTextFilePathAnnotationObject(_model_base.Model): - """Represents the data of a streamed file path annotation as applied to a streaming text content - part. - - :ivar file_id: The file ID for the annotation. - :vartype file_id: str - """ - - file_id: Optional[str] = rest_field() - """The file ID for the annotation.""" - - @overload - def __init__( - self, - *, - file_id: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageImageFileContent(MessageContent, discriminator="image_file"): - """A representation of image file content in a thread message. - - - :ivar type: The object type, which is always 'image_file'. Required. Default value is - "image_file". - :vartype type: str - :ivar image_file: The image file for this thread message content item. Required. - :vartype image_file: ~azure.ai.client.models.MessageImageFileDetails - """ - - type: Literal["image_file"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'image_file'. Required. Default value is \"image_file\".""" - image_file: "_models.MessageImageFileDetails" = rest_field() - """The image file for this thread message content item. Required.""" - - @overload - def __init__( - self, - *, - image_file: "_models.MessageImageFileDetails", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="image_file", **kwargs) - - -class MessageImageFileDetails(_model_base.Model): - """An image reference, as represented in thread message content. - - - :ivar file_id: The ID for the file associated with this image. Required. - :vartype file_id: str - """ - - file_id: str = rest_field() - """The ID for the file associated with this image. Required.""" - - @overload - def __init__( - self, - *, - file_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageIncompleteDetails(_model_base.Model): - """Information providing additional detail about a message entering an incomplete status. - - - :ivar reason: The provided reason describing why the message was marked as incomplete. - Required. Known values are: "content_filter", "max_tokens", "run_cancelled", "run_failed", and - "run_expired". - :vartype reason: str or ~azure.ai.client.models.MessageIncompleteDetailsReason - """ - - reason: Union[str, "_models.MessageIncompleteDetailsReason"] = rest_field() - """The provided reason describing why the message was marked as incomplete. Required. Known values - are: \"content_filter\", \"max_tokens\", \"run_cancelled\", \"run_failed\", and - \"run_expired\".""" - - @overload - def __init__( - self, - *, - reason: Union[str, "_models.MessageIncompleteDetailsReason"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageTextAnnotation(_model_base.Model): - """An abstract representation of an annotation to text thread message content. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - MessageTextFileCitationAnnotation, MessageTextFilePathAnnotation - - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - :ivar text: The textual content associated with this text annotation item. Required. - :vartype text: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - text: str = rest_field() - """The textual content associated with this text annotation item. Required.""" - - @overload - def __init__( - self, - *, - type: str, - text: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageTextContent(MessageContent, discriminator="text"): - """A representation of a textual item of thread message content. - - - :ivar type: The object type, which is always 'text'. Required. Default value is "text". - :vartype type: str - :ivar text: The text and associated annotations for this thread message content item. Required. - :vartype text: ~azure.ai.client.models.MessageTextDetails - """ - - type: Literal["text"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'text'. Required. Default value is \"text\".""" - text: "_models.MessageTextDetails" = rest_field() - """The text and associated annotations for this thread message content item. Required.""" - - @overload - def __init__( - self, - *, - text: "_models.MessageTextDetails", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="text", **kwargs) - - -class MessageTextDetails(_model_base.Model): - """The text and associated annotations for a single item of agent thread message content. - - - :ivar value: The text data. Required. - :vartype value: str - :ivar annotations: A list of annotations associated with this text. Required. - :vartype annotations: list[~azure.ai.client.models.MessageTextAnnotation] - """ - - value: str = rest_field() - """The text data. Required.""" - annotations: List["_models.MessageTextAnnotation"] = rest_field() - """A list of annotations associated with this text. Required.""" - - @overload - def __init__( - self, - *, - value: str, - annotations: List["_models.MessageTextAnnotation"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageTextFileCitationAnnotation(MessageTextAnnotation, discriminator="file_citation"): - """A citation within the message that points to a specific quote from a specific File associated - with the agent or the message. Generated when the agent uses the 'file_search' tool to search - files. - - - :ivar text: The textual content associated with this text annotation item. Required. - :vartype text: str - :ivar type: The object type, which is always 'file_citation'. Required. Default value is - "file_citation". - :vartype type: str - :ivar file_citation: A citation within the message that points to a specific quote from a - specific file. - Generated when the agent uses the "file_search" tool to search files. Required. - :vartype file_citation: ~azure.ai.client.models.MessageTextFileCitationDetails - :ivar start_index: The first text index associated with this text annotation. - :vartype start_index: int - :ivar end_index: The last text index associated with this text annotation. - :vartype end_index: int - """ - - type: Literal["file_citation"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'file_citation'. Required. Default value is \"file_citation\".""" - file_citation: "_models.MessageTextFileCitationDetails" = rest_field() - """A citation within the message that points to a specific quote from a specific file. - Generated when the agent uses the \"file_search\" tool to search files. Required.""" - start_index: Optional[int] = rest_field() - """The first text index associated with this text annotation.""" - end_index: Optional[int] = rest_field() - """The last text index associated with this text annotation.""" - - @overload - def __init__( - self, - *, - text: str, - file_citation: "_models.MessageTextFileCitationDetails", - start_index: Optional[int] = None, - end_index: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="file_citation", **kwargs) - - -class MessageTextFileCitationDetails(_model_base.Model): - """A representation of a file-based text citation, as used in a file-based annotation of text - thread message content. - - - :ivar file_id: The ID of the file associated with this citation. Required. - :vartype file_id: str - :ivar quote: The specific quote cited in the associated file. Required. - :vartype quote: str - """ - - file_id: str = rest_field() - """The ID of the file associated with this citation. Required.""" - quote: str = rest_field() - """The specific quote cited in the associated file. Required.""" - - @overload - def __init__( - self, - *, - file_id: str, - quote: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MessageTextFilePathAnnotation(MessageTextAnnotation, discriminator="file_path"): - """A citation within the message that points to a file located at a specific path. - - - :ivar text: The textual content associated with this text annotation item. Required. - :vartype text: str - :ivar type: The object type, which is always 'file_path'. Required. Default value is - "file_path". - :vartype type: str - :ivar file_path: A URL for the file that's generated when the agent used the code_interpreter - tool to generate a file. Required. - :vartype file_path: ~azure.ai.client.models.MessageTextFilePathDetails - :ivar start_index: The first text index associated with this text annotation. - :vartype start_index: int - :ivar end_index: The last text index associated with this text annotation. - :vartype end_index: int - """ - - type: Literal["file_path"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'file_path'. Required. Default value is \"file_path\".""" - file_path: "_models.MessageTextFilePathDetails" = rest_field() - """A URL for the file that's generated when the agent used the code_interpreter tool to generate a - file. Required.""" - start_index: Optional[int] = rest_field() - """The first text index associated with this text annotation.""" - end_index: Optional[int] = rest_field() - """The last text index associated with this text annotation.""" - - @overload - def __init__( - self, - *, - text: str, - file_path: "_models.MessageTextFilePathDetails", - start_index: Optional[int] = None, - end_index: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="file_path", **kwargs) - - -class MessageTextFilePathDetails(_model_base.Model): - """An encapsulation of an image file ID, as used by message image content. - - - :ivar file_id: The ID of the specific file that the citation is from. Required. - :vartype file_id: str - """ - - file_id: str = rest_field() - """The ID of the specific file that the citation is from. Required.""" - - @overload - def __init__( - self, - *, - file_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="microsoft_fabric"): - """The input definition information for a Microsoft Fabric tool as used to configure an agent. - - - :ivar type: The object type, which is always 'microsoft_fabric'. Required. Default value is - "microsoft_fabric". - :vartype type: str - """ - - type: Literal["microsoft_fabric"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'microsoft_fabric'. Required. Default value is - \"microsoft_fabric\".""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="microsoft_fabric", **kwargs) - - -class OpenAIFile(_model_base.Model): - """Represents an agent that can call the model and use tools. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always 'file'. Required. Default value is "file". - :vartype object: str - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar bytes: The size of the file, in bytes. Required. - :vartype bytes: int - :ivar filename: The name of the file. Required. - :vartype filename: str - :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar purpose: The intended purpose of a file. Required. Known values are: "fine-tune", - "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". - :vartype purpose: str or ~azure.ai.client.models.FilePurpose - :ivar status: The state of the file. This field is available in Azure OpenAI only. Known values - are: "uploaded", "pending", "running", "processed", "error", "deleting", and "deleted". - :vartype status: str or ~azure.ai.client.models.FileState - :ivar status_details: The error message with details in case processing of this file failed. - This field is available in Azure OpenAI only. - :vartype status_details: str - """ - - object: Literal["file"] = rest_field() - """The object type, which is always 'file'. Required. Default value is \"file\".""" - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - bytes: int = rest_field() - """The size of the file, in bytes. Required.""" - filename: str = rest_field() - """The name of the file. Required.""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this object was created. Required.""" - purpose: Union[str, "_models.FilePurpose"] = rest_field() - """The intended purpose of a file. Required. Known values are: \"fine-tune\", - \"fine-tune-results\", \"assistants\", \"assistants_output\", \"batch\", \"batch_output\", and - \"vision\".""" - status: Optional[Union[str, "_models.FileState"]] = rest_field() - """The state of the file. This field is available in Azure OpenAI only. Known values are: - \"uploaded\", \"pending\", \"running\", \"processed\", \"error\", \"deleting\", and - \"deleted\".""" - status_details: Optional[str] = rest_field() - """The error message with details in case processing of this file failed. This field is available - in Azure OpenAI only.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - bytes: int, - filename: str, - created_at: datetime.datetime, - purpose: Union[str, "_models.FilePurpose"], - status: Optional[Union[str, "_models.FileState"]] = None, - status_details: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["file"] = "file" - - -class OpenAIPageableListOfAgent(_model_base.Model): - """The response data for a requested list of items. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always list. Required. Default value is "list". - :vartype object: str - :ivar data: The requested list of items. Required. - :vartype data: list[~azure.ai.client.models.Agent] - :ivar first_id: The first ID represented in this list. Required. - :vartype first_id: str - :ivar last_id: The last ID represented in this list. Required. - :vartype last_id: str - :ivar has_more: A value indicating whether there are additional values available not captured - in this list. Required. - :vartype has_more: bool - """ - - object: Literal["list"] = rest_field() - """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.Agent"] = rest_field() - """The requested list of items. Required.""" - first_id: str = rest_field() - """The first ID represented in this list. Required.""" - last_id: str = rest_field() - """The last ID represented in this list. Required.""" - has_more: bool = rest_field() - """A value indicating whether there are additional values available not captured in this list. - Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.Agent"], - first_id: str, - last_id: str, - has_more: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class OpenAIPageableListOfRunStep(_model_base.Model): - """The response data for a requested list of items. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always list. Required. Default value is "list". - :vartype object: str - :ivar data: The requested list of items. Required. - :vartype data: list[~azure.ai.client.models.RunStep] - :ivar first_id: The first ID represented in this list. Required. - :vartype first_id: str - :ivar last_id: The last ID represented in this list. Required. - :vartype last_id: str - :ivar has_more: A value indicating whether there are additional values available not captured - in this list. Required. - :vartype has_more: bool - """ - - object: Literal["list"] = rest_field() - """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.RunStep"] = rest_field() - """The requested list of items. Required.""" - first_id: str = rest_field() - """The first ID represented in this list. Required.""" - last_id: str = rest_field() - """The last ID represented in this list. Required.""" - has_more: bool = rest_field() - """A value indicating whether there are additional values available not captured in this list. - Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.RunStep"], - first_id: str, - last_id: str, - has_more: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class OpenAIPageableListOfThreadMessage(_model_base.Model): - """The response data for a requested list of items. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always list. Required. Default value is "list". - :vartype object: str - :ivar data: The requested list of items. Required. - :vartype data: list[~azure.ai.client.models.ThreadMessage] - :ivar first_id: The first ID represented in this list. Required. - :vartype first_id: str - :ivar last_id: The last ID represented in this list. Required. - :vartype last_id: str - :ivar has_more: A value indicating whether there are additional values available not captured - in this list. Required. - :vartype has_more: bool - """ - - object: Literal["list"] = rest_field() - """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.ThreadMessage"] = rest_field() - """The requested list of items. Required.""" - first_id: str = rest_field() - """The first ID represented in this list. Required.""" - last_id: str = rest_field() - """The last ID represented in this list. Required.""" - has_more: bool = rest_field() - """A value indicating whether there are additional values available not captured in this list. - Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.ThreadMessage"], - first_id: str, - last_id: str, - has_more: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class OpenAIPageableListOfThreadRun(_model_base.Model): - """The response data for a requested list of items. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always list. Required. Default value is "list". - :vartype object: str - :ivar data: The requested list of items. Required. - :vartype data: list[~azure.ai.client.models.ThreadRun] - :ivar first_id: The first ID represented in this list. Required. - :vartype first_id: str - :ivar last_id: The last ID represented in this list. Required. - :vartype last_id: str - :ivar has_more: A value indicating whether there are additional values available not captured - in this list. Required. - :vartype has_more: bool - """ - - object: Literal["list"] = rest_field() - """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.ThreadRun"] = rest_field() - """The requested list of items. Required.""" - first_id: str = rest_field() - """The first ID represented in this list. Required.""" - last_id: str = rest_field() - """The last ID represented in this list. Required.""" - has_more: bool = rest_field() - """A value indicating whether there are additional values available not captured in this list. - Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.ThreadRun"], - first_id: str, - last_id: str, - has_more: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class OpenAIPageableListOfVectorStore(_model_base.Model): - """The response data for a requested list of items. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always list. Required. Default value is "list". - :vartype object: str - :ivar data: The requested list of items. Required. - :vartype data: list[~azure.ai.client.models.VectorStore] - :ivar first_id: The first ID represented in this list. Required. - :vartype first_id: str - :ivar last_id: The last ID represented in this list. Required. - :vartype last_id: str - :ivar has_more: A value indicating whether there are additional values available not captured - in this list. Required. - :vartype has_more: bool - """ - - object: Literal["list"] = rest_field() - """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.VectorStore"] = rest_field() - """The requested list of items. Required.""" - first_id: str = rest_field() - """The first ID represented in this list. Required.""" - last_id: str = rest_field() - """The last ID represented in this list. Required.""" - has_more: bool = rest_field() - """A value indicating whether there are additional values available not captured in this list. - Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.VectorStore"], - first_id: str, - last_id: str, - has_more: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class OpenAIPageableListOfVectorStoreFile(_model_base.Model): - """The response data for a requested list of items. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always list. Required. Default value is "list". - :vartype object: str - :ivar data: The requested list of items. Required. - :vartype data: list[~azure.ai.client.models.VectorStoreFile] - :ivar first_id: The first ID represented in this list. Required. - :vartype first_id: str - :ivar last_id: The last ID represented in this list. Required. - :vartype last_id: str - :ivar has_more: A value indicating whether there are additional values available not captured - in this list. Required. - :vartype has_more: bool - """ - - object: Literal["list"] = rest_field() - """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.VectorStoreFile"] = rest_field() - """The requested list of items. Required.""" - first_id: str = rest_field() - """The first ID represented in this list. Required.""" - last_id: str = rest_field() - """The last ID represented in this list. Required.""" - has_more: bool = rest_field() - """A value indicating whether there are additional values available not captured in this list. - Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.VectorStoreFile"], - first_id: str, - last_id: str, - has_more: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class RecurrenceSchedule(_model_base.Model): - """RecurrenceSchedule Definition. - - - :ivar hours: List of hours for the schedule. Required. - :vartype hours: list[int] - :ivar minutes: List of minutes for the schedule. Required. - :vartype minutes: list[int] - :ivar week_days: List of days for the schedule. Required. - :vartype week_days: list[str or ~azure.ai.client.models.WeekDays] - :ivar month_days: List of month days for the schedule. Required. - :vartype month_days: list[int] - """ - - hours: List[int] = rest_field() - """List of hours for the schedule. Required.""" - minutes: List[int] = rest_field() - """List of minutes for the schedule. Required.""" - week_days: List[Union[str, "_models.WeekDays"]] = rest_field(name="weekDays") - """List of days for the schedule. Required.""" - month_days: List[int] = rest_field(name="monthDays") - """List of month days for the schedule. Required.""" - - @overload - def __init__( - self, - *, - hours: List[int], - minutes: List[int], - week_days: List[Union[str, "_models.WeekDays"]], - month_days: List[int], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RecurrenceTrigger(Trigger, discriminator="Recurrence"): - """Recurrence Trigger Definition. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar type: Required. Default value is "Recurrence". - :vartype type: str - :ivar frequency: The frequency to trigger schedule. Required. Known values are: "Month", - "Week", "Day", "Hour", and "Minute". - :vartype frequency: str or ~azure.ai.client.models.Frequency - :ivar interval: Specifies schedule interval in conjunction with frequency. Required. - :vartype interval: int - :ivar schedule: The recurrence schedule. Required. - :vartype schedule: ~azure.ai.client.models.RecurrenceSchedule - """ - - type: Literal["Recurrence"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore - """Required. Default value is \"Recurrence\".""" - frequency: Union[str, "_models.Frequency"] = rest_field() - """The frequency to trigger schedule. Required. Known values are: \"Month\", \"Week\", \"Day\", - \"Hour\", and \"Minute\".""" - interval: int = rest_field() - """Specifies schedule interval in conjunction with frequency. Required.""" - schedule: "_models.RecurrenceSchedule" = rest_field() - """The recurrence schedule. Required.""" - - @overload - def __init__( - self, - *, - frequency: Union[str, "_models.Frequency"], - interval: int, - schedule: "_models.RecurrenceSchedule", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="Recurrence", **kwargs) - - -class RequiredAction(_model_base.Model): - """An abstract representation of a required action for an agent thread run to continue. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - SubmitToolOutputsAction - - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RequiredToolCall(_model_base.Model): - """An abstract representation a a tool invocation needed by the model to continue a run. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RequiredFunctionToolCall - - - :ivar type: The object type for the required tool call. Required. Default value is None. - :vartype type: str - :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. - Required. - :vartype id: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type for the required tool call. Required. Default value is None.""" - id: str = rest_field() - """The ID of the tool call. This ID must be referenced when submitting tool outputs. Required.""" - - @overload - def __init__( - self, - *, - type: str, - id: str, # pylint: disable=redefined-builtin - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RequiredFunctionToolCall(RequiredToolCall, discriminator="function"): - """A representation of a requested call to a function tool, needed by the model to continue - evaluation of a run. - - - :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. - Required. - :vartype id: str - :ivar type: The object type of the required tool call. Always 'function' for function tools. - Required. Default value is "function". - :vartype type: str - :ivar function: Detailed information about the function to be executed by the tool that - includes name and arguments. Required. - :vartype function: ~azure.ai.client.models.RequiredFunctionToolCallDetails - """ - - type: Literal["function"] = rest_discriminator(name="type") # type: ignore - """The object type of the required tool call. Always 'function' for function tools. Required. - Default value is \"function\".""" - function: "_models.RequiredFunctionToolCallDetails" = rest_field() - """Detailed information about the function to be executed by the tool that includes name and - arguments. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - function: "_models.RequiredFunctionToolCallDetails", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="function", **kwargs) - - -class RequiredFunctionToolCallDetails(_model_base.Model): - """The detailed information for a function invocation, as provided by a required action invoking a - function tool, that includes the name of and arguments to the function. - - - :ivar name: The name of the function. Required. - :vartype name: str - :ivar arguments: The arguments to use when invoking the named function, as provided by the - model. Arguments are presented as a JSON document that should be validated and parsed for - evaluation. Required. - :vartype arguments: str - """ - - name: str = rest_field() - """The name of the function. Required.""" - arguments: str = rest_field() - """The arguments to use when invoking the named function, as provided by the model. Arguments are - presented as a JSON document that should be validated and parsed for evaluation. Required.""" - - @overload - def __init__( - self, - *, - name: str, - arguments: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunCompletionUsage(_model_base.Model): - """Usage statistics related to the run. This value will be ``null`` if the run is not in a - terminal state (i.e. ``in_progress``\\ , ``queued``\\ , etc.). - - - :ivar completion_tokens: Number of completion tokens used over the course of the run. Required. - :vartype completion_tokens: int - :ivar prompt_tokens: Number of prompt tokens used over the course of the run. Required. - :vartype prompt_tokens: int - :ivar total_tokens: Total number of tokens used (prompt + completion). Required. - :vartype total_tokens: int - """ - - completion_tokens: int = rest_field() - """Number of completion tokens used over the course of the run. Required.""" - prompt_tokens: int = rest_field() - """Number of prompt tokens used over the course of the run. Required.""" - total_tokens: int = rest_field() - """Total number of tokens used (prompt + completion). Required.""" - - @overload - def __init__( - self, - *, - completion_tokens: int, - prompt_tokens: int, - total_tokens: int, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunError(_model_base.Model): - """The details of an error as encountered by an agent thread run. - - - :ivar code: The status for the error. Required. - :vartype code: str - :ivar message: The human-readable text associated with the error. Required. - :vartype message: str - """ - - code: str = rest_field() - """The status for the error. Required.""" - message: str = rest_field() - """The human-readable text associated with the error. Required.""" - - @overload - def __init__( - self, - *, - code: str, - message: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStep(_model_base.Model): # pylint: disable=too-many-instance-attributes - """Detailed information about a single step of an agent thread run. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always 'thread.run.step'. Required. Default value is - "thread.run.step". - :vartype object: str - :ivar type: The type of run step, which can be either message_creation or tool_calls. Required. - Known values are: "message_creation" and "tool_calls". - :vartype type: str or ~azure.ai.client.models.RunStepType - :ivar assistant_id: The ID of the agent associated with the run step. Required. - :vartype assistant_id: str - :ivar thread_id: The ID of the thread that was run. Required. - :vartype thread_id: str - :ivar run_id: The ID of the run that this run step is a part of. Required. - :vartype run_id: str - :ivar status: The status of this run step. Required. Known values are: "in_progress", - "cancelled", "failed", "completed", and "expired". - :vartype status: str or ~azure.ai.client.models.RunStepStatus - :ivar step_details: The details for this run step. Required. - :vartype step_details: ~azure.ai.client.models.RunStepDetails - :ivar last_error: If applicable, information about the last error encountered by this run step. - Required. - :vartype last_error: ~azure.ai.client.models.RunStepError - :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar expired_at: The Unix timestamp, in seconds, representing when this item expired. - Required. - :vartype expired_at: ~datetime.datetime - :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. - :vartype completed_at: ~datetime.datetime - :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. - Required. - :vartype cancelled_at: ~datetime.datetime - :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. - :vartype failed_at: ~datetime.datetime - :ivar usage: Usage statistics related to the run step. This value will be ``null`` while the - run step's status is ``in_progress``. - :vartype usage: ~azure.ai.client.models.RunStepCompletionUsage - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required. - :vartype metadata: dict[str, str] - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["thread.run.step"] = rest_field() - """The object type, which is always 'thread.run.step'. Required. Default value is - \"thread.run.step\".""" - type: Union[str, "_models.RunStepType"] = rest_field() - """The type of run step, which can be either message_creation or tool_calls. Required. Known - values are: \"message_creation\" and \"tool_calls\".""" - assistant_id: str = rest_field() - """The ID of the agent associated with the run step. Required.""" - thread_id: str = rest_field() - """The ID of the thread that was run. Required.""" - run_id: str = rest_field() - """The ID of the run that this run step is a part of. Required.""" - status: Union[str, "_models.RunStepStatus"] = rest_field() - """The status of this run step. Required. Known values are: \"in_progress\", \"cancelled\", - \"failed\", \"completed\", and \"expired\".""" - step_details: "_models.RunStepDetails" = rest_field() - """The details for this run step. Required.""" - last_error: "_models.RunStepError" = rest_field() - """If applicable, information about the last error encountered by this run step. Required.""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this object was created. Required.""" - expired_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this item expired. Required.""" - completed_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this completed. Required.""" - cancelled_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" - failed_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this failed. Required.""" - usage: Optional["_models.RunStepCompletionUsage"] = rest_field() - """Usage statistics related to the run step. This value will be ``null`` while the run step's - status is ``in_progress``.""" - metadata: Dict[str, str] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - type: Union[str, "_models.RunStepType"], - assistant_id: str, - thread_id: str, - run_id: str, - status: Union[str, "_models.RunStepStatus"], - step_details: "_models.RunStepDetails", - last_error: "_models.RunStepError", - created_at: datetime.datetime, - expired_at: datetime.datetime, - completed_at: datetime.datetime, - cancelled_at: datetime.datetime, - failed_at: datetime.datetime, - metadata: Dict[str, str], - usage: Optional["_models.RunStepCompletionUsage"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread.run.step"] = "thread.run.step" - - -class RunStepToolCall(_model_base.Model): - """An abstract representation of a detailed tool call as recorded within a run step for an - existing run. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepAzureAISearchToolCall, RunStepBingGroundingToolCall, RunStepCodeInterpreterToolCall, - RunStepFileSearchToolCall, RunStepFunctionToolCall, RunStepMicrosoftFabricToolCall, - RunStepSharepointToolCall - - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - id: str = rest_field() - """The ID of the tool call. This ID must be referenced when you submit tool outputs. Required.""" - - @overload - def __init__( - self, - *, - type: str, - id: str, # pylint: disable=redefined-builtin - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepAzureAISearchToolCall(RunStepToolCall, discriminator="azure_ai_search"): - """A record of a call to an Azure AI Search tool, issued by the model in evaluation of a defined - tool, that represents - executed Azure AI search. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is - "azure_ai_search". - :vartype type: str - :ivar azure_ai_search: Reserved for future use. Required. - :vartype azure_ai_search: dict[str, str] - """ - - type: Literal["azure_ai_search"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'azure_ai_search'. Required. Default value is - \"azure_ai_search\".""" - azure_ai_search: Dict[str, str] = rest_field() - """Reserved for future use. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - azure_ai_search: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="azure_ai_search", **kwargs) - - -class RunStepBingGroundingToolCall(RunStepToolCall, discriminator="bing_grounding"): - """A record of a call to a bing grounding tool, issued by the model in evaluation of a defined - tool, that represents - executed search with bing grounding. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is - "bing_grounding". - :vartype type: str - :ivar bing_grounding: Reserved for future use. Required. - :vartype bing_grounding: dict[str, str] - """ - - type: Literal["bing_grounding"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'bing_grounding'. Required. Default value is - \"bing_grounding\".""" - bing_grounding: Dict[str, str] = rest_field() - """Reserved for future use. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - bing_grounding: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="bing_grounding", **kwargs) - - -class RunStepCodeInterpreterToolCallOutput(_model_base.Model): - """An abstract representation of an emitted output from a code interpreter tool. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepCodeInterpreterImageOutput, RunStepCodeInterpreterLogOutput - - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepCodeInterpreterImageOutput(RunStepCodeInterpreterToolCallOutput, discriminator="image"): - """A representation of an image output emitted by a code interpreter tool in response to a tool - call by the model. - - - :ivar type: The object type, which is always 'image'. Required. Default value is "image". - :vartype type: str - :ivar image: Referential information for the image associated with this output. Required. - :vartype image: ~azure.ai.client.models.RunStepCodeInterpreterImageReference - """ - - type: Literal["image"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'image'. Required. Default value is \"image\".""" - image: "_models.RunStepCodeInterpreterImageReference" = rest_field() - """Referential information for the image associated with this output. Required.""" - - @overload - def __init__( - self, - *, - image: "_models.RunStepCodeInterpreterImageReference", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="image", **kwargs) - - -class RunStepCodeInterpreterImageReference(_model_base.Model): - """An image reference emitted by a code interpreter tool in response to a tool call by the model. - - - :ivar file_id: The ID of the file associated with this image. Required. - :vartype file_id: str - """ - - file_id: str = rest_field() - """The ID of the file associated with this image. Required.""" - - @overload - def __init__( - self, - *, - file_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepCodeInterpreterLogOutput(RunStepCodeInterpreterToolCallOutput, discriminator="logs"): - """A representation of a log output emitted by a code interpreter tool in response to a tool call - by the model. - - - :ivar type: The object type, which is always 'logs'. Required. Default value is "logs". - :vartype type: str - :ivar logs: The serialized log output emitted by the code interpreter. Required. - :vartype logs: str - """ - - type: Literal["logs"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'logs'. Required. Default value is \"logs\".""" - logs: str = rest_field() - """The serialized log output emitted by the code interpreter. Required.""" - - @overload - def __init__( - self, - *, - logs: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="logs", **kwargs) - - -class RunStepCodeInterpreterToolCall(RunStepToolCall, discriminator="code_interpreter"): - """A record of a call to a code interpreter tool, issued by the model in evaluation of a defined - tool, that - represents inputs and outputs consumed and emitted by the code interpreter. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is - "code_interpreter". - :vartype type: str - :ivar code_interpreter: The details of the tool call to the code interpreter tool. Required. - :vartype code_interpreter: ~azure.ai.client.models.RunStepCodeInterpreterToolCallDetails - """ - - type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'code_interpreter'. Required. Default value is - \"code_interpreter\".""" - code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails" = rest_field() - """The details of the tool call to the code interpreter tool. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="code_interpreter", **kwargs) - - -class RunStepCodeInterpreterToolCallDetails(_model_base.Model): - """The detailed information about a code interpreter invocation by the model. - - - :ivar input: The input provided by the model to the code interpreter tool. Required. - :vartype input: str - :ivar outputs: The outputs produced by the code interpreter tool back to the model in response - to the tool call. Required. - :vartype outputs: list[~azure.ai.client.models.RunStepCodeInterpreterToolCallOutput] - """ - - input: str = rest_field() - """The input provided by the model to the code interpreter tool. Required.""" - outputs: List["_models.RunStepCodeInterpreterToolCallOutput"] = rest_field() - """The outputs produced by the code interpreter tool back to the model in response to the tool - call. Required.""" - - @overload - def __init__( - self, - *, - input: str, - outputs: List["_models.RunStepCodeInterpreterToolCallOutput"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepCompletionUsage(_model_base.Model): - """Usage statistics related to the run step. - - - :ivar completion_tokens: Number of completion tokens used over the course of the run step. - Required. - :vartype completion_tokens: int - :ivar prompt_tokens: Number of prompt tokens used over the course of the run step. Required. - :vartype prompt_tokens: int - :ivar total_tokens: Total number of tokens used (prompt + completion). Required. - :vartype total_tokens: int - """ - - completion_tokens: int = rest_field() - """Number of completion tokens used over the course of the run step. Required.""" - prompt_tokens: int = rest_field() - """Number of prompt tokens used over the course of the run step. Required.""" - total_tokens: int = rest_field() - """Total number of tokens used (prompt + completion). Required.""" - - @overload - def __init__( - self, - *, - completion_tokens: int, - prompt_tokens: int, - total_tokens: int, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepDelta(_model_base.Model): - """Represents the delta payload in a streaming run step delta chunk. - - :ivar step_details: The details of the run step. - :vartype step_details: ~azure.ai.client.models.RunStepDeltaDetail - """ - - step_details: Optional["_models.RunStepDeltaDetail"] = rest_field() - """The details of the run step.""" - - @overload - def __init__( - self, - *, - step_details: Optional["_models.RunStepDeltaDetail"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepDeltaChunk(_model_base.Model): - """Represents a run step delta i.e. any changed fields on a run step during streaming. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier of the run step, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always ``thread.run.step.delta``. Required. Default - value is "thread.run.step.delta". - :vartype object: str - :ivar delta: The delta containing the fields that have changed on the run step. Required. - :vartype delta: ~azure.ai.client.models.RunStepDelta - """ - - id: str = rest_field() - """The identifier of the run step, which can be referenced in API endpoints. Required.""" - object: Literal["thread.run.step.delta"] = rest_field() - """The object type, which is always ``thread.run.step.delta``. Required. Default value is - \"thread.run.step.delta\".""" - delta: "_models.RunStepDelta" = rest_field() - """The delta containing the fields that have changed on the run step. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - delta: "_models.RunStepDelta", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread.run.step.delta"] = "thread.run.step.delta" - - -class RunStepDeltaCodeInterpreterDetailItemObject(_model_base.Model): # pylint: disable=name-too-long - """Represents the Code Interpreter tool call data in a streaming run step's tool calls. - - :ivar input: The input into the Code Interpreter tool call. - :vartype input: str - :ivar outputs: The outputs from the Code Interpreter tool call. Code Interpreter can output one - or more - items, including text (\\ ``logs``\\ ) or images (\\ ``image``\\ ). Each of these are - represented by a - different object type. - :vartype outputs: list[~azure.ai.client.models.RunStepDeltaCodeInterpreterOutput] - """ - - input: Optional[str] = rest_field() - """The input into the Code Interpreter tool call.""" - outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = rest_field() - """The outputs from the Code Interpreter tool call. Code Interpreter can output one or more - items, including text (\ ``logs``\ ) or images (\ ``image``\ ). Each of these are represented - by a - different object type.""" - - @overload - def __init__( - self, - *, - input: Optional[str] = None, - outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepDeltaCodeInterpreterOutput(_model_base.Model): - """The abstract base representation of a streaming run step tool call's Code Interpreter tool - output. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepDeltaCodeInterpreterImageOutput, RunStepDeltaCodeInterpreterLogOutput - - - :ivar index: The index of the output in the streaming run step tool call's Code Interpreter - outputs array. Required. - :vartype index: int - :ivar type: The type of the streaming run step tool call's Code Interpreter output. Required. - Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - index: int = rest_field() - """The index of the output in the streaming run step tool call's Code Interpreter outputs array. - Required.""" - type: str = rest_discriminator(name="type") - """The type of the streaming run step tool call's Code Interpreter output. Required. Default value - is None.""" - - @overload - def __init__( - self, - *, - index: int, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepDeltaCodeInterpreterImageOutput(RunStepDeltaCodeInterpreterOutput, discriminator="image"): - """Represents an image output as produced the Code interpreter tool and as represented in a - streaming run step's delta tool calls collection. - - - :ivar index: The index of the output in the streaming run step tool call's Code Interpreter - outputs array. Required. - :vartype index: int - :ivar type: The object type, which is always "image.". Required. Default value is "image". - :vartype type: str - :ivar image: The image data for the Code Interpreter tool call output. - :vartype image: ~azure.ai.client.models.RunStepDeltaCodeInterpreterImageOutputObject - """ - - type: Literal["image"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always \"image.\". Required. Default value is \"image\".""" - image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = rest_field() - """The image data for the Code Interpreter tool call output.""" - - @overload - def __init__( - self, - *, - index: int, - image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="image", **kwargs) - - -class RunStepDeltaCodeInterpreterImageOutputObject(_model_base.Model): # pylint: disable=name-too-long - """Represents the data for a streaming run step's Code Interpreter tool call image output. - - :ivar file_id: The file ID for the image. - :vartype file_id: str - """ - - file_id: Optional[str] = rest_field() - """The file ID for the image.""" - - @overload - def __init__( - self, - *, - file_id: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepDeltaCodeInterpreterLogOutput(RunStepDeltaCodeInterpreterOutput, discriminator="logs"): - """Represents a log output as produced by the Code Interpreter tool and as represented in a - streaming run step's delta tool calls collection. - - - :ivar index: The index of the output in the streaming run step tool call's Code Interpreter - outputs array. Required. - :vartype index: int - :ivar type: The type of the object, which is always "logs.". Required. Default value is "logs". - :vartype type: str - :ivar logs: The text output from the Code Interpreter tool call. - :vartype logs: str - """ - - type: Literal["logs"] = rest_discriminator(name="type") # type: ignore - """The type of the object, which is always \"logs.\". Required. Default value is \"logs\".""" - logs: Optional[str] = rest_field() - """The text output from the Code Interpreter tool call.""" - - @overload - def __init__( - self, - *, - index: int, - logs: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="logs", **kwargs) - - -class RunStepDeltaToolCall(_model_base.Model): - """The abstract base representation of a single tool call within a streaming run step's delta tool - call details. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepDeltaCodeInterpreterToolCall, RunStepDeltaFileSearchToolCall, - RunStepDeltaFunctionToolCall - - - :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. - :vartype index: int - :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. - :vartype id: str - :ivar type: The type of the tool call detail item in a streaming run step's details. Required. - Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - index: int = rest_field() - """The index of the tool call detail in the run step's tool_calls array. Required.""" - id: str = rest_field() - """The ID of the tool call, used when submitting outputs to the run. Required.""" - type: str = rest_discriminator(name="type") - """The type of the tool call detail item in a streaming run step's details. Required. Default - value is None.""" - - @overload - def __init__( - self, - *, - index: int, - id: str, # pylint: disable=redefined-builtin - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepDeltaCodeInterpreterToolCall(RunStepDeltaToolCall, discriminator="code_interpreter"): - """Represents a Code Interpreter tool call within a streaming run step's tool call details. - - - :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. - :vartype index: int - :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. - :vartype id: str - :ivar type: The object type, which is always "code_interpreter.". Required. Default value is - "code_interpreter". - :vartype type: str - :ivar code_interpreter: The Code Interpreter data for the tool call. - :vartype code_interpreter: ~azure.ai.client.models.RunStepDeltaCodeInterpreterDetailItemObject - """ - - type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always \"code_interpreter.\". Required. Default value is - \"code_interpreter\".""" - code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = rest_field() - """The Code Interpreter data for the tool call.""" - - @overload - def __init__( - self, - *, - index: int, - id: str, # pylint: disable=redefined-builtin - code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="code_interpreter", **kwargs) - - -class RunStepDeltaDetail(_model_base.Model): - """Represents a single run step detail item in a streaming run step's delta payload. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepDeltaMessageCreation, RunStepDeltaToolCallObject - - - :ivar type: The object type for the run step detail object. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type for the run step detail object. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepDeltaFileSearchToolCall(RunStepDeltaToolCall, discriminator="file_search"): - """Represents a file search tool call within a streaming run step's tool call details. - - - :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. - :vartype index: int - :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. - :vartype id: str - :ivar type: The object type, which is always "file_search.". Required. Default value is - "file_search". - :vartype type: str - :ivar file_search: Reserved for future use. - :vartype file_search: dict[str, str] - """ - - type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always \"file_search.\". Required. Default value is \"file_search\".""" - file_search: Optional[Dict[str, str]] = rest_field() - """Reserved for future use.""" - - @overload - def __init__( - self, - *, - index: int, - id: str, # pylint: disable=redefined-builtin - file_search: Optional[Dict[str, str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="file_search", **kwargs) - - -class RunStepDeltaFunction(_model_base.Model): - """Represents the function data in a streaming run step delta's function tool call. - - :ivar name: The name of the function. - :vartype name: str - :ivar arguments: The arguments passed to the function as input. - :vartype arguments: str - :ivar output: The output of the function, null if outputs have not yet been submitted. - :vartype output: str - """ - - name: Optional[str] = rest_field() - """The name of the function.""" - arguments: Optional[str] = rest_field() - """The arguments passed to the function as input.""" - output: Optional[str] = rest_field() - """The output of the function, null if outputs have not yet been submitted.""" - - @overload - def __init__( - self, - *, - name: Optional[str] = None, - arguments: Optional[str] = None, - output: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepDeltaFunctionToolCall(RunStepDeltaToolCall, discriminator="function"): - """Represents a function tool call within a streaming run step's tool call details. - - - :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. - :vartype index: int - :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. - :vartype id: str - :ivar type: The object type, which is always "function.". Required. Default value is - "function". - :vartype type: str - :ivar function: The function data for the tool call. - :vartype function: ~azure.ai.client.models.RunStepDeltaFunction - """ - - type: Literal["function"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always \"function.\". Required. Default value is \"function\".""" - function: Optional["_models.RunStepDeltaFunction"] = rest_field() - """The function data for the tool call.""" - - @overload - def __init__( - self, - *, - index: int, - id: str, # pylint: disable=redefined-builtin - function: Optional["_models.RunStepDeltaFunction"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="function", **kwargs) - - -class RunStepDeltaMessageCreation(RunStepDeltaDetail, discriminator="message_creation"): - """Represents a message creation within a streaming run step delta. - - - :ivar type: The object type, which is always "message_creation.". Required. Default value is - "message_creation". - :vartype type: str - :ivar message_creation: The message creation data. - :vartype message_creation: ~azure.ai.client.models.RunStepDeltaMessageCreationObject - """ - - type: Literal["message_creation"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always \"message_creation.\". Required. Default value is - \"message_creation\".""" - message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = rest_field() - """The message creation data.""" - - @overload - def __init__( - self, - *, - message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="message_creation", **kwargs) - - -class RunStepDeltaMessageCreationObject(_model_base.Model): - """Represents the data within a streaming run step message creation response object. - - :ivar message_id: The ID of the newly-created message. - :vartype message_id: str - """ - - message_id: Optional[str] = rest_field() - """The ID of the newly-created message.""" - - @overload - def __init__( - self, - *, - message_id: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepDeltaToolCallObject(RunStepDeltaDetail, discriminator="tool_calls"): - """Represents an invocation of tool calls as part of a streaming run step. - - - :ivar type: The object type, which is always "tool_calls.". Required. Default value is - "tool_calls". - :vartype type: str - :ivar tool_calls: The collection of tool calls for the tool call detail item. - :vartype tool_calls: list[~azure.ai.client.models.RunStepDeltaToolCall] - """ - - type: Literal["tool_calls"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always \"tool_calls.\". Required. Default value is \"tool_calls\".""" - tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = rest_field() - """The collection of tool calls for the tool call detail item.""" - - @overload - def __init__( - self, - *, - tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="tool_calls", **kwargs) - - -class RunStepDetails(_model_base.Model): - """An abstract representation of the details for a run step. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepMessageCreationDetails, RunStepToolCallDetails - - - :ivar type: The object type. Required. Known values are: "message_creation" and "tool_calls". - :vartype type: str or ~azure.ai.client.models.RunStepType - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Known values are: \"message_creation\" and \"tool_calls\".""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepError(_model_base.Model): - """The error information associated with a failed run step. - - - :ivar code: The error code for this error. Required. Known values are: "server_error" and - "rate_limit_exceeded". - :vartype code: str or ~azure.ai.client.models.RunStepErrorCode - :ivar message: The human-readable text associated with this error. Required. - :vartype message: str - """ - - code: Union[str, "_models.RunStepErrorCode"] = rest_field() - """The error code for this error. Required. Known values are: \"server_error\" and - \"rate_limit_exceeded\".""" - message: str = rest_field() - """The human-readable text associated with this error. Required.""" - - @overload - def __init__( - self, - *, - code: Union[str, "_models.RunStepErrorCode"], - message: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepFileSearchToolCall(RunStepToolCall, discriminator="file_search"): - """A record of a call to a file search tool, issued by the model in evaluation of a defined tool, - that represents - executed file search. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'file_search'. Required. Default value is - "file_search". - :vartype type: str - :ivar file_search: Reserved for future use. Required. - :vartype file_search: dict[str, str] - """ - - type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" - file_search: Dict[str, str] = rest_field() - """Reserved for future use. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - file_search: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="file_search", **kwargs) - - -class RunStepFunctionToolCall(RunStepToolCall, discriminator="function"): - """A record of a call to a function tool, issued by the model in evaluation of a defined tool, - that represents the inputs - and output consumed and emitted by the specified function. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'function'. Required. Default value is "function". - :vartype type: str - :ivar function: The detailed information about the function called by the model. Required. - :vartype function: ~azure.ai.client.models.RunStepFunctionToolCallDetails - """ - - type: Literal["function"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'function'. Required. Default value is \"function\".""" - function: "_models.RunStepFunctionToolCallDetails" = rest_field() - """The detailed information about the function called by the model. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - function: "_models.RunStepFunctionToolCallDetails", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="function", **kwargs) - - -class RunStepFunctionToolCallDetails(_model_base.Model): - """The detailed information about the function called by the model. - - - :ivar name: The name of the function. Required. - :vartype name: str - :ivar arguments: The arguments that the model requires are provided to the named function. - Required. - :vartype arguments: str - :ivar output: The output of the function, only populated for function calls that have already - have had their outputs submitted. Required. - :vartype output: str - """ - - name: str = rest_field() - """The name of the function. Required.""" - arguments: str = rest_field() - """The arguments that the model requires are provided to the named function. Required.""" - output: str = rest_field() - """The output of the function, only populated for function calls that have already have had their - outputs submitted. Required.""" - - @overload - def __init__( - self, - *, - name: str, - arguments: str, - output: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepMessageCreationDetails(RunStepDetails, discriminator="message_creation"): - """The detailed information associated with a message creation run step. - - - :ivar type: The object type, which is always 'message_creation'. Required. Represents a run - step to create a message. - :vartype type: str or ~azure.ai.client.models.MESSAGE_CREATION - :ivar message_creation: Information about the message creation associated with this run step. - Required. - :vartype message_creation: ~azure.ai.client.models.RunStepMessageCreationReference - """ - - type: Literal[RunStepType.MESSAGE_CREATION] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'message_creation'. Required. Represents a run step to create - a message.""" - message_creation: "_models.RunStepMessageCreationReference" = rest_field() - """Information about the message creation associated with this run step. Required.""" - - @overload - def __init__( - self, - *, - message_creation: "_models.RunStepMessageCreationReference", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type=RunStepType.MESSAGE_CREATION, **kwargs) - - -class RunStepMessageCreationReference(_model_base.Model): - """The details of a message created as a part of a run step. - - - :ivar message_id: The ID of the message created by this run step. Required. - :vartype message_id: str - """ - - message_id: str = rest_field() - """The ID of the message created by this run step. Required.""" - - @overload - def __init__( - self, - *, - message_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class RunStepMicrosoftFabricToolCall(RunStepToolCall, discriminator="microsoft_fabric"): - """A record of a call to a Microsoft Fabric tool, issued by the model in evaluation of a defined - tool, that represents - executed Microsoft Fabric operations. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'microsoft_fabric'. Required. Default value is - "microsoft_fabric". - :vartype type: str - :ivar microsoft_fabric: Reserved for future use. Required. - :vartype microsoft_fabric: dict[str, str] - """ - - type: Literal["microsoft_fabric"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'microsoft_fabric'. Required. Default value is - \"microsoft_fabric\".""" - microsoft_fabric: Dict[str, str] = rest_field() - """Reserved for future use. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - microsoft_fabric: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="microsoft_fabric", **kwargs) - - -class RunStepSharepointToolCall(RunStepToolCall, discriminator="sharepoint"): - """A record of a call to a SharePoint tool, issued by the model in evaluation of a defined tool, - that represents - executed SharePoint actions. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'sharepoint'. Required. Default value is - "sharepoint". - :vartype type: str - :ivar share_point: Reserved for future use. Required. - :vartype share_point: dict[str, str] - """ - - type: Literal["sharepoint"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'sharepoint'. Required. Default value is \"sharepoint\".""" - share_point: Dict[str, str] = rest_field(name="sharepoint") - """Reserved for future use. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - share_point: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="sharepoint", **kwargs) - - -class RunStepToolCallDetails(RunStepDetails, discriminator="tool_calls"): - """The detailed information associated with a run step calling tools. - - - :ivar type: The object type, which is always 'tool_calls'. Required. Represents a run step that - calls tools. - :vartype type: str or ~azure.ai.client.models.TOOL_CALLS - :ivar tool_calls: A list of tool call details for this run step. Required. - :vartype tool_calls: list[~azure.ai.client.models.RunStepToolCall] - """ - - type: Literal[RunStepType.TOOL_CALLS] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'tool_calls'. Required. Represents a run step that calls - tools.""" - tool_calls: List["_models.RunStepToolCall"] = rest_field() - """A list of tool call details for this run step. Required.""" - - @overload - def __init__( - self, - *, - tool_calls: List["_models.RunStepToolCall"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type=RunStepType.TOOL_CALLS, **kwargs) - - -class SamplingStrategy(_model_base.Model): - """SamplingStrategy Definition. - - - :ivar rate: Sampling rate. Required. - :vartype rate: float - """ - - rate: float = rest_field() - """Sampling rate. Required.""" - - @overload - def __init__( - self, - *, - rate: float, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class SharepointToolDefinition(ToolDefinition, discriminator="sharepoint"): - """The input definition information for a sharepoint tool as used to configure an agent. - - - :ivar type: The object type, which is always 'sharepoint'. Required. Default value is - "sharepoint". - :vartype type: str - """ - - type: Literal["sharepoint"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'sharepoint'. Required. Default value is \"sharepoint\".""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="sharepoint", **kwargs) - - -class SubmitToolOutputsAction(RequiredAction, discriminator="submit_tool_outputs"): - """The details for required tool calls that must be submitted for an agent thread run to continue. - - - :ivar type: The object type, which is always 'submit_tool_outputs'. Required. Default value is - "submit_tool_outputs". - :vartype type: str - :ivar submit_tool_outputs: The details describing tools that should be called to submit tool - outputs. Required. - :vartype submit_tool_outputs: ~azure.ai.client.models.SubmitToolOutputsDetails - """ - - type: Literal["submit_tool_outputs"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'submit_tool_outputs'. Required. Default value is - \"submit_tool_outputs\".""" - submit_tool_outputs: "_models.SubmitToolOutputsDetails" = rest_field() - """The details describing tools that should be called to submit tool outputs. Required.""" - - @overload - def __init__( - self, - *, - submit_tool_outputs: "_models.SubmitToolOutputsDetails", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type="submit_tool_outputs", **kwargs) - - -class SubmitToolOutputsDetails(_model_base.Model): - """The details describing tools that should be called to submit tool outputs. - - - :ivar tool_calls: The list of tool calls that must be resolved for the agent thread run to - continue. Required. - :vartype tool_calls: list[~azure.ai.client.models.RequiredToolCall] - """ - - tool_calls: List["_models.RequiredToolCall"] = rest_field() - """The list of tool calls that must be resolved for the agent thread run to continue. Required.""" - - @overload - def __init__( - self, - *, - tool_calls: List["_models.RequiredToolCall"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class SystemData(_model_base.Model): - """Metadata pertaining to creation and last modification of the resource. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - :ivar created_at: The timestamp the resource was created at. - :vartype created_at: ~datetime.datetime - :ivar created_by: The identity that created the resource. - :vartype created_by: str - :ivar created_by_type: The identity type that created the resource. - :vartype created_by_type: str - :ivar last_modified_at: The timestamp of resource last modification (UTC). - :vartype last_modified_at: ~datetime.datetime - """ - - created_at: Optional[datetime.datetime] = rest_field(name="createdAt", visibility=["read"], format="rfc3339") - """The timestamp the resource was created at.""" - created_by: Optional[str] = rest_field(name="createdBy", visibility=["read"]) - """The identity that created the resource.""" - created_by_type: Optional[str] = rest_field(name="createdByType", visibility=["read"]) - """The identity type that created the resource.""" - last_modified_at: Optional[datetime.datetime] = rest_field( - name="lastModifiedAt", visibility=["read"], format="rfc3339" - ) - """The timestamp of resource last modification (UTC).""" - - -class ThreadDeletionStatus(_model_base.Model): - """The status of a thread deletion operation. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The ID of the resource specified for deletion. Required. - :vartype id: str - :ivar deleted: A value indicating whether deletion was successful. Required. - :vartype deleted: bool - :ivar object: The object type, which is always 'thread.deleted'. Required. Default value is - "thread.deleted". - :vartype object: str - """ - - id: str = rest_field() - """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() - """A value indicating whether deletion was successful. Required.""" - object: Literal["thread.deleted"] = rest_field() - """The object type, which is always 'thread.deleted'. Required. Default value is - \"thread.deleted\".""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - deleted: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread.deleted"] = "thread.deleted" - - -class ThreadMessage(_model_base.Model): # pylint: disable=too-many-instance-attributes - """A single, existing message within an agent thread. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always 'thread.message'. Required. Default value is - "thread.message". - :vartype object: str - :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar thread_id: The ID of the thread that this message belongs to. Required. - :vartype thread_id: str - :ivar status: The status of the message. Required. Known values are: "in_progress", - "incomplete", and "completed". - :vartype status: str or ~azure.ai.client.models.MessageStatus - :ivar incomplete_details: On an incomplete message, details about why the message is - incomplete. Required. - :vartype incomplete_details: ~azure.ai.client.models.MessageIncompleteDetails - :ivar completed_at: The Unix timestamp (in seconds) for when the message was completed. - Required. - :vartype completed_at: ~datetime.datetime - :ivar incomplete_at: The Unix timestamp (in seconds) for when the message was marked as - incomplete. Required. - :vartype incomplete_at: ~datetime.datetime - :ivar role: The role associated with the agent thread message. Required. Known values are: - "user" and "assistant". - :vartype role: str or ~azure.ai.client.models.MessageRole - :ivar content: The list of content items associated with the agent thread message. Required. - :vartype content: list[~azure.ai.client.models.MessageContent] - :ivar assistant_id: If applicable, the ID of the agent that authored this message. Required. - :vartype assistant_id: str - :ivar run_id: If applicable, the ID of the run associated with the authoring of this message. - Required. - :vartype run_id: str - :ivar attachments: A list of files attached to the message, and the tools they were added to. - Required. - :vartype attachments: list[~azure.ai.client.models.MessageAttachment] - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required. - :vartype metadata: dict[str, str] - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["thread.message"] = rest_field() - """The object type, which is always 'thread.message'. Required. Default value is - \"thread.message\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this object was created. Required.""" - thread_id: str = rest_field() - """The ID of the thread that this message belongs to. Required.""" - status: Union[str, "_models.MessageStatus"] = rest_field() - """The status of the message. Required. Known values are: \"in_progress\", \"incomplete\", and - \"completed\".""" - incomplete_details: "_models.MessageIncompleteDetails" = rest_field() - """On an incomplete message, details about why the message is incomplete. Required.""" - completed_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the message was completed. Required.""" - incomplete_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the message was marked as incomplete. Required.""" - role: Union[str, "_models.MessageRole"] = rest_field() - """The role associated with the agent thread message. Required. Known values are: \"user\" and - \"assistant\".""" - content: List["_models.MessageContent"] = rest_field() - """The list of content items associated with the agent thread message. Required.""" - assistant_id: str = rest_field() - """If applicable, the ID of the agent that authored this message. Required.""" - run_id: str = rest_field() - """If applicable, the ID of the run associated with the authoring of this message. Required.""" - attachments: List["_models.MessageAttachment"] = rest_field() - """A list of files attached to the message, and the tools they were added to. Required.""" - metadata: Dict[str, str] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - created_at: datetime.datetime, - thread_id: str, - status: Union[str, "_models.MessageStatus"], - incomplete_details: "_models.MessageIncompleteDetails", - completed_at: datetime.datetime, - incomplete_at: datetime.datetime, - role: Union[str, "_models.MessageRole"], - content: List["_models.MessageContent"], - assistant_id: str, - run_id: str, - attachments: List["_models.MessageAttachment"], - metadata: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread.message"] = "thread.message" - - -class ThreadMessageOptions(_model_base.Model): - """A single message within an agent thread, as provided during that thread's creation for its - initial state. - - All required parameters must be populated in order to send to server. - - :ivar role: The role of the entity that is creating the message. Allowed values include: - - - * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Required. Known values are: "user" and "assistant". - :vartype role: str or ~azure.ai.client.models.MessageRole - :ivar content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :vartype content: str - :ivar attachments: A list of files attached to the message, and the tools they should be added - to. - :vartype attachments: list[~azure.ai.client.models.MessageAttachment] - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. - :vartype metadata: dict[str, str] - """ - - role: Union[str, "_models.MessageRole"] = rest_field() - """The role of the entity that is creating the message. Allowed values include: - - - * ``user``\ : Indicates the message is sent by an actual user and should be used in most cases - to represent user-generated messages. - * ``assistant``\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Required. Known values are: \"user\" and \"assistant\".""" - content: str = rest_field() - """The textual content of the initial message. Currently, robust input including images and - annotated text may only be provided via - a separate call to the create message API. Required.""" - attachments: Optional[List["_models.MessageAttachment"]] = rest_field() - """A list of files attached to the message, and the tools they should be added to.""" - metadata: Optional[Dict[str, str]] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length.""" - - @overload - def __init__( - self, - *, - role: Union[str, "_models.MessageRole"], - content: str, - attachments: Optional[List["_models.MessageAttachment"]] = None, - metadata: Optional[Dict[str, str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class ThreadRun(_model_base.Model): # pylint: disable=too-many-instance-attributes - """Data representing a single evaluation run of an agent thread. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always 'thread.run'. Required. Default value is - "thread.run". - :vartype object: str - :ivar thread_id: The ID of the thread associated with this run. Required. - :vartype thread_id: str - :ivar assistant_id: The ID of the agent associated with the thread this run was performed - against. Required. - :vartype assistant_id: str - :ivar status: The status of the agent thread run. Required. Known values are: "queued", - "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", and - "expired". - :vartype status: str or ~azure.ai.client.models.RunStatus - :ivar required_action: The details of the action required for the agent thread run to continue. - :vartype required_action: ~azure.ai.client.models.RequiredAction - :ivar last_error: The last error, if any, encountered by this agent thread run. Required. - :vartype last_error: ~azure.ai.client.models.RunError - :ivar model: The ID of the model to use. Required. - :vartype model: str - :ivar instructions: The overridden system instructions used for this agent thread run. - Required. - :vartype instructions: str - :ivar tools: The overridden enabled tools used for this agent thread run. Required. - :vartype tools: list[~azure.ai.client.models.ToolDefinition] - :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar expires_at: The Unix timestamp, in seconds, representing when this item expires. - Required. - :vartype expires_at: ~datetime.datetime - :ivar started_at: The Unix timestamp, in seconds, representing when this item was started. - Required. - :vartype started_at: ~datetime.datetime - :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. - :vartype completed_at: ~datetime.datetime - :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. - Required. - :vartype cancelled_at: ~datetime.datetime - :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. - :vartype failed_at: ~datetime.datetime - :ivar incomplete_details: Details on why the run is incomplete. Will be ``null`` if the run is - not incomplete. Required. Known values are: "max_completion_tokens" and "max_prompt_tokens". - :vartype incomplete_details: str or ~azure.ai.client.models.IncompleteRunDetails - :ivar usage: Usage statistics related to the run. This value will be ``null`` if the run is not - in a terminal state (i.e. ``in_progress``\\ , ``queued``\\ , etc.). Required. - :vartype usage: ~azure.ai.client.models.RunCompletionUsage - :ivar temperature: The sampling temperature used for this run. If not set, defaults to 1. - :vartype temperature: float - :ivar top_p: The nucleus sampling value used for this run. If not set, defaults to 1. - :vartype top_p: float - :ivar max_prompt_tokens: The maximum number of prompt tokens specified to have been used over - the course of the run. Required. - :vartype max_prompt_tokens: int - :ivar max_completion_tokens: The maximum number of completion tokens specified to have been - used over the course of the run. Required. - :vartype max_completion_tokens: int - :ivar truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Required. - :vartype truncation_strategy: ~azure.ai.client.models.TruncationObject - :ivar tool_choice: Controls whether or not and which tool is called by the model. Required. Is - one of the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice - :vartype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :ivar response_format: The response format of the tool calls used in this run. Required. Is one - of the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat - :vartype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode or - ~azure.ai.client.models.AgentsApiResponseFormat - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required. - :vartype metadata: dict[str, str] - :ivar tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. - :vartype tool_resources: ~azure.ai.client.models.UpdateToolResourcesOptions - :ivar parallel_tool_calls: Determines if tools can be executed in parallel within the run. - :vartype parallel_tool_calls: bool - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["thread.run"] = rest_field() - """The object type, which is always 'thread.run'. Required. Default value is \"thread.run\".""" - thread_id: str = rest_field() - """The ID of the thread associated with this run. Required.""" - assistant_id: str = rest_field() - """The ID of the agent associated with the thread this run was performed against. Required.""" - status: Union[str, "_models.RunStatus"] = rest_field() - """The status of the agent thread run. Required. Known values are: \"queued\", \"in_progress\", - \"requires_action\", \"cancelling\", \"cancelled\", \"failed\", \"completed\", and \"expired\".""" - required_action: Optional["_models.RequiredAction"] = rest_field() - """The details of the action required for the agent thread run to continue.""" - last_error: "_models.RunError" = rest_field() - """The last error, if any, encountered by this agent thread run. Required.""" - model: str = rest_field() - """The ID of the model to use. Required.""" - instructions: str = rest_field() - """The overridden system instructions used for this agent thread run. Required.""" - tools: List["_models.ToolDefinition"] = rest_field() - """The overridden enabled tools used for this agent thread run. Required.""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this object was created. Required.""" - expires_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this item expires. Required.""" - started_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this item was started. Required.""" - completed_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this completed. Required.""" - cancelled_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" - failed_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this failed. Required.""" - incomplete_details: Union[str, "_models.IncompleteRunDetails"] = rest_field() - """Details on why the run is incomplete. Will be ``null`` if the run is not incomplete. Required. - Known values are: \"max_completion_tokens\" and \"max_prompt_tokens\".""" - usage: "_models.RunCompletionUsage" = rest_field() - """Usage statistics related to the run. This value will be ``null`` if the run is not in a - terminal state (i.e. ``in_progress``\ , ``queued``\ , etc.). Required.""" - temperature: Optional[float] = rest_field() - """The sampling temperature used for this run. If not set, defaults to 1.""" - top_p: Optional[float] = rest_field() - """The nucleus sampling value used for this run. If not set, defaults to 1.""" - max_prompt_tokens: int = rest_field() - """The maximum number of prompt tokens specified to have been used over the course of the run. - Required.""" - max_completion_tokens: int = rest_field() - """The maximum number of completion tokens specified to have been used over the course of the run. - Required.""" - truncation_strategy: "_models.TruncationObject" = rest_field() - """The strategy to use for dropping messages as the context windows moves forward. Required.""" - tool_choice: "_types.AgentsApiToolChoiceOption" = rest_field() - """Controls whether or not and which tool is called by the model. Required. Is one of the - following types: str, Union[str, \"_models.AgentsApiToolChoiceOptionMode\"], - AgentsNamedToolChoice""" - response_format: "_types.AgentsApiResponseFormatOption" = rest_field() - """The response format of the tool calls used in this run. Required. Is one of the following - types: str, Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat""" - metadata: Dict[str, str] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required.""" - tool_resources: Optional["_models.UpdateToolResourcesOptions"] = rest_field() - """Override the tools the agent can use for this run. This is useful for modifying the behavior on - a per-run basis.""" - parallel_tool_calls: Optional[bool] = rest_field(name="parallelToolCalls") - """Determines if tools can be executed in parallel within the run.""" - - @overload - def __init__( # pylint: disable=too-many-locals - self, - *, - id: str, # pylint: disable=redefined-builtin - thread_id: str, - assistant_id: str, - status: Union[str, "_models.RunStatus"], - last_error: "_models.RunError", - model: str, - instructions: str, - tools: List["_models.ToolDefinition"], - created_at: datetime.datetime, - expires_at: datetime.datetime, - started_at: datetime.datetime, - completed_at: datetime.datetime, - cancelled_at: datetime.datetime, - failed_at: datetime.datetime, - incomplete_details: Union[str, "_models.IncompleteRunDetails"], - usage: "_models.RunCompletionUsage", - max_prompt_tokens: int, - max_completion_tokens: int, - truncation_strategy: "_models.TruncationObject", - tool_choice: "_types.AgentsApiToolChoiceOption", - response_format: "_types.AgentsApiResponseFormatOption", - metadata: Dict[str, str], - required_action: Optional["_models.RequiredAction"] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - tool_resources: Optional["_models.UpdateToolResourcesOptions"] = None, - parallel_tool_calls: Optional[bool] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread.run"] = "thread.run" - - -class ToolOutput(_model_base.Model): - """The data provided during a tool outputs submission to resolve pending tool calls and allow the - model to continue. - - :ivar tool_call_id: The ID of the tool call being resolved, as provided in the tool calls of a - required action from a run. - :vartype tool_call_id: str - :ivar output: The output from the tool to be submitted. - :vartype output: str - """ - - tool_call_id: Optional[str] = rest_field() - """The ID of the tool call being resolved, as provided in the tool calls of a required action from - a run.""" - output: Optional[str] = rest_field() - """The output from the tool to be submitted.""" - - @overload - def __init__( - self, - *, - tool_call_id: Optional[str] = None, - output: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class ToolResources(_model_base.Model): - """A set of resources that are used by the agent's tools. The resources are specific to the type - of - tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the - ``file_search`` - tool requires a list of vector store IDs. - - :ivar code_interpreter: Resources to be used by the ``code_interpreter tool`` consisting of - file IDs. - :vartype code_interpreter: ~azure.ai.client.models.CodeInterpreterToolResource - :ivar file_search: Resources to be used by the ``file_search`` tool consisting of vector store - IDs. - :vartype file_search: ~azure.ai.client.models.FileSearchToolResource - :ivar bing_grounding: Resources to be used by the ``bing_grounding`` tool consisting of - connection IDs. - :vartype bing_grounding: ~azure.ai.client.models.ConnectionListResource - :ivar microsoft_fabric: Resources to be used by the ``microsoft_fabric`` tool consisting of - connection IDs. - :vartype microsoft_fabric: ~azure.ai.client.models.ConnectionListResource - :ivar share_point: Resources to be used by the ``sharepoint`` tool consisting of connection - IDs. - :vartype share_point: ~azure.ai.client.models.ConnectionListResource - :ivar azure_ai_search: Resources to be used by the ``azure_ai_search`` tool consisting of index - IDs and names. - :vartype azure_ai_search: ~azure.ai.client.models.AzureAISearchResource - """ - - code_interpreter: Optional["_models.CodeInterpreterToolResource"] = rest_field() - """Resources to be used by the ``code_interpreter tool`` consisting of file IDs.""" - file_search: Optional["_models.FileSearchToolResource"] = rest_field() - """Resources to be used by the ``file_search`` tool consisting of vector store IDs.""" - bing_grounding: Optional["_models.ConnectionListResource"] = rest_field() - """Resources to be used by the ``bing_grounding`` tool consisting of connection IDs.""" - microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() - """Resources to be used by the ``microsoft_fabric`` tool consisting of connection IDs.""" - share_point: Optional["_models.ConnectionListResource"] = rest_field(name="sharepoint") - """Resources to be used by the ``sharepoint`` tool consisting of connection IDs.""" - azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() - """Resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names.""" - - @overload - def __init__( - self, - *, - code_interpreter: Optional["_models.CodeInterpreterToolResource"] = None, - file_search: Optional["_models.FileSearchToolResource"] = None, - bing_grounding: Optional["_models.ConnectionListResource"] = None, - microsoft_fabric: Optional["_models.ConnectionListResource"] = None, - share_point: Optional["_models.ConnectionListResource"] = None, - azure_ai_search: Optional["_models.AzureAISearchResource"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class TruncationObject(_model_base.Model): - """Controls for how a thread will be truncated prior to the run. Use this to control the initial - context window of the run. - - - :ivar type: The truncation strategy to use for the thread. The default is ``auto``. If set to - ``last_messages``\\ , the thread will - be truncated to the ``lastMessages`` count most recent messages in the thread. When set to - ``auto``\\ , messages in the middle of the thread - will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known - values are: "auto" and "last_messages". - :vartype type: str or ~azure.ai.client.models.TruncationStrategy - :ivar last_messages: The number of most recent messages from the thread when constructing the - context for the run. - :vartype last_messages: int - """ - - type: Union[str, "_models.TruncationStrategy"] = rest_field() - """The truncation strategy to use for the thread. The default is ``auto``. If set to - ``last_messages``\ , the thread will - be truncated to the ``lastMessages`` count most recent messages in the thread. When set to - ``auto``\ , messages in the middle of the thread - will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known - values are: \"auto\" and \"last_messages\".""" - last_messages: Optional[int] = rest_field() - """The number of most recent messages from the thread when constructing the context for the run.""" - - @overload - def __init__( - self, - *, - type: Union[str, "_models.TruncationStrategy"], - last_messages: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class UpdateCodeInterpreterToolResourceOptions(_model_base.Model): - """Request object to update ``code_interpreted`` tool resources. - - :ivar file_ids: A list of file IDs to override the current list of the agent. - :vartype file_ids: list[str] - """ - - file_ids: Optional[List[str]] = rest_field() - """A list of file IDs to override the current list of the agent.""" - - @overload - def __init__( - self, - *, - file_ids: Optional[List[str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class UpdateFileSearchToolResourceOptions(_model_base.Model): - """Request object to update ``file_search`` tool resources. - - :ivar vector_store_ids: A list of vector store IDs to override the current list of the agent. - :vartype vector_store_ids: list[str] - """ - - vector_store_ids: Optional[List[str]] = rest_field() - """A list of vector store IDs to override the current list of the agent.""" - - @overload - def __init__( - self, - *, - vector_store_ids: Optional[List[str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class UpdateToolResourcesOptions(_model_base.Model): - """Request object. A set of resources that are used by the agent's tools. The resources are - specific to the type of tool. - For example, the ``code_interpreter`` tool requires a list of file IDs, while the - ``file_search`` tool requires a list of - vector store IDs. - - :ivar code_interpreter: Overrides the list of file IDs made available to the - ``code_interpreter`` tool. There can be a maximum of 20 files - associated with the tool. - :vartype code_interpreter: ~azure.ai.client.models.UpdateCodeInterpreterToolResourceOptions - :ivar file_search: Overrides the vector store attached to this agent. There can be a maximum of - 1 vector store attached to the agent. - :vartype file_search: ~azure.ai.client.models.UpdateFileSearchToolResourceOptions - :ivar bing_grounding: Overrides the list of connections to be used by the ``bing_grounding`` - tool consisting of connection IDs. - :vartype bing_grounding: ~azure.ai.client.models.ConnectionListResource - :ivar microsoft_fabric: Overrides the list of connections to be used by the - ``microsoft_fabric`` tool consisting of connection IDs. - :vartype microsoft_fabric: ~azure.ai.client.models.ConnectionListResource - :ivar share_point: Overrides the list of connections to be used by the ``sharepoint`` tool - consisting of connection IDs. - :vartype share_point: ~azure.ai.client.models.ConnectionListResource - :ivar azure_ai_search: Overrides the resources to be used by the ``azure_ai_search`` tool - consisting of index IDs and names. - :vartype azure_ai_search: ~azure.ai.client.models.AzureAISearchResource - """ - - code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = rest_field() - """Overrides the list of file IDs made available to the ``code_interpreter`` tool. There can be a - maximum of 20 files - associated with the tool.""" - file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = rest_field() - """Overrides the vector store attached to this agent. There can be a maximum of 1 vector store - attached to the agent.""" - bing_grounding: Optional["_models.ConnectionListResource"] = rest_field() - """Overrides the list of connections to be used by the ``bing_grounding`` tool consisting of - connection IDs.""" - microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() - """Overrides the list of connections to be used by the ``microsoft_fabric`` tool consisting of - connection IDs.""" - share_point: Optional["_models.ConnectionListResource"] = rest_field(name="sharepoint") - """Overrides the list of connections to be used by the ``sharepoint`` tool consisting of - connection IDs.""" - azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() - """Overrides the resources to be used by the ``azure_ai_search`` tool consisting of index IDs and - names.""" - - @overload - def __init__( - self, - *, - code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = None, - file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = None, - bing_grounding: Optional["_models.ConnectionListResource"] = None, - microsoft_fabric: Optional["_models.ConnectionListResource"] = None, - share_point: Optional["_models.ConnectionListResource"] = None, - azure_ai_search: Optional["_models.AzureAISearchResource"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class VectorStore(_model_base.Model): # pylint: disable=too-many-instance-attributes - """A vector store is a collection of processed files can be used by the ``file_search`` tool. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always ``vector_store``. Required. Default value is - "vector_store". - :vartype object: str - :ivar created_at: The Unix timestamp (in seconds) for when the vector store was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar name: The name of the vector store. Required. - :vartype name: str - :ivar usage_bytes: The total number of bytes used by the files in the vector store. Required. - :vartype usage_bytes: int - :ivar file_counts: Files count grouped by status processed or being processed by this vector - store. Required. - :vartype file_counts: ~azure.ai.client.models.VectorStoreFileCount - :ivar status: The status of the vector store, which can be either ``expired``\\ , - ``in_progress``\\ , or ``completed``. A status of ``completed`` indicates that the vector store - is ready for use. Required. Known values are: "expired", "in_progress", and "completed". - :vartype status: str or ~azure.ai.client.models.VectorStoreStatus - :ivar expires_after: Details on when this vector store expires. - :vartype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy - :ivar expires_at: The Unix timestamp (in seconds) for when the vector store will expire. - :vartype expires_at: ~datetime.datetime - :ivar last_active_at: The Unix timestamp (in seconds) for when the vector store was last - active. Required. - :vartype last_active_at: ~datetime.datetime - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required. - :vartype metadata: dict[str, str] - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["vector_store"] = rest_field() - """The object type, which is always ``vector_store``. Required. Default value is \"vector_store\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the vector store was created. Required.""" - name: str = rest_field() - """The name of the vector store. Required.""" - usage_bytes: int = rest_field() - """The total number of bytes used by the files in the vector store. Required.""" - file_counts: "_models.VectorStoreFileCount" = rest_field() - """Files count grouped by status processed or being processed by this vector store. Required.""" - status: Union[str, "_models.VectorStoreStatus"] = rest_field() - """The status of the vector store, which can be either ``expired``\ , ``in_progress``\ , or - ``completed``. A status of ``completed`` indicates that the vector store is ready for use. - Required. Known values are: \"expired\", \"in_progress\", and \"completed\".""" - expires_after: Optional["_models.VectorStoreExpirationPolicy"] = rest_field() - """Details on when this vector store expires.""" - expires_at: Optional[datetime.datetime] = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the vector store will expire.""" - last_active_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the vector store was last active. Required.""" - metadata: Dict[str, str] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - created_at: datetime.datetime, - name: str, - usage_bytes: int, - file_counts: "_models.VectorStoreFileCount", - status: Union[str, "_models.VectorStoreStatus"], - last_active_at: datetime.datetime, - metadata: Dict[str, str], - expires_after: Optional["_models.VectorStoreExpirationPolicy"] = None, - expires_at: Optional[datetime.datetime] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["vector_store"] = "vector_store" - - -class VectorStoreChunkingStrategyRequest(_model_base.Model): - """An abstract representation of a vector store chunking strategy configuration. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - VectorStoreAutoChunkingStrategyRequest, VectorStoreStaticChunkingStrategyRequest - - All required parameters must be populated in order to send to server. - - :ivar type: The object type. Required. Known values are: "auto" and "static". - :vartype type: str or ~azure.ai.client.models.VectorStoreChunkingStrategyRequestType - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Known values are: \"auto\" and \"static\".""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class VectorStoreAutoChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="auto"): - """The default strategy. This strategy currently uses a max_chunk_size_tokens of 800 and - chunk_overlap_tokens of 400. - - All required parameters must be populated in order to send to server. - - :ivar type: The object type, which is always 'auto'. Required. - :vartype type: str or ~azure.ai.client.models.AUTO - """ - - type: Literal[VectorStoreChunkingStrategyRequestType.AUTO] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'auto'. Required.""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.AUTO, **kwargs) - - -class VectorStoreChunkingStrategyResponse(_model_base.Model): - """An abstract representation of a vector store chunking strategy configuration. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - VectorStoreAutoChunkingStrategyResponse, VectorStoreStaticChunkingStrategyResponse - - - :ivar type: The object type. Required. Known values are: "other" and "static". - :vartype type: str or ~azure.ai.client.models.VectorStoreChunkingStrategyResponseType - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Known values are: \"other\" and \"static\".""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class VectorStoreAutoChunkingStrategyResponse(VectorStoreChunkingStrategyResponse, discriminator="other"): - """This is returned when the chunking strategy is unknown. Typically, this is because the file was - indexed before the chunking_strategy concept was introduced in the API. - - - :ivar type: The object type, which is always 'other'. Required. - :vartype type: str or ~azure.ai.client.models.OTHER - """ - - type: Literal[VectorStoreChunkingStrategyResponseType.OTHER] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'other'. Required.""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.OTHER, **kwargs) - - -class VectorStoreDeletionStatus(_model_base.Model): - """Response object for deleting a vector store. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The ID of the resource specified for deletion. Required. - :vartype id: str - :ivar deleted: A value indicating whether deletion was successful. Required. - :vartype deleted: bool - :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value - is "vector_store.deleted". - :vartype object: str - """ - - id: str = rest_field() - """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() - """A value indicating whether deletion was successful. Required.""" - object: Literal["vector_store.deleted"] = rest_field() - """The object type, which is always 'vector_store.deleted'. Required. Default value is - \"vector_store.deleted\".""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - deleted: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["vector_store.deleted"] = "vector_store.deleted" - - -class VectorStoreExpirationPolicy(_model_base.Model): - """The expiration policy for a vector store. - - - :ivar anchor: Anchor timestamp after which the expiration policy applies. Supported anchors: - ``last_active_at``. Required. "last_active_at" - :vartype anchor: str or ~azure.ai.client.models.VectorStoreExpirationPolicyAnchor - :ivar days: The anchor timestamp after which the expiration policy applies. Required. - :vartype days: int - """ - - anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"] = rest_field() - """Anchor timestamp after which the expiration policy applies. Supported anchors: - ``last_active_at``. Required. \"last_active_at\"""" - days: int = rest_field() - """The anchor timestamp after which the expiration policy applies. Required.""" - - @overload - def __init__( - self, - *, - anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"], - days: int, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class VectorStoreFile(_model_base.Model): - """Description of a file attached to a vector store. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always ``vector_store.file``. Required. Default value - is "vector_store.file". - :vartype object: str - :ivar usage_bytes: The total vector store usage in bytes. Note that this may be different from - the original file - size. Required. - :vartype usage_bytes: int - :ivar created_at: The Unix timestamp (in seconds) for when the vector store file was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. - :vartype vector_store_id: str - :ivar status: The status of the vector store file, which can be either ``in_progress``\\ , - ``completed``\\ , ``cancelled``\\ , or ``failed``. The status ``completed`` indicates that the - vector store file is ready for use. Required. Known values are: "in_progress", "completed", - "failed", and "cancelled". - :vartype status: str or ~azure.ai.client.models.VectorStoreFileStatus - :ivar last_error: The last error associated with this vector store file. Will be ``null`` if - there are no errors. Required. - :vartype last_error: ~azure.ai.client.models.VectorStoreFileError - :ivar chunking_strategy: The strategy used to chunk the file. Required. - :vartype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyResponse - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["vector_store.file"] = rest_field() - """The object type, which is always ``vector_store.file``. Required. Default value is - \"vector_store.file\".""" - usage_bytes: int = rest_field() - """The total vector store usage in bytes. Note that this may be different from the original file - size. Required.""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the vector store file was created. Required.""" - vector_store_id: str = rest_field() - """The ID of the vector store that the file is attached to. Required.""" - status: Union[str, "_models.VectorStoreFileStatus"] = rest_field() - """The status of the vector store file, which can be either ``in_progress``\ , ``completed``\ , - ``cancelled``\ , or ``failed``. The status ``completed`` indicates that the vector store file - is ready for use. Required. Known values are: \"in_progress\", \"completed\", \"failed\", and - \"cancelled\".""" - last_error: "_models.VectorStoreFileError" = rest_field() - """The last error associated with this vector store file. Will be ``null`` if there are no errors. - Required.""" - chunking_strategy: "_models.VectorStoreChunkingStrategyResponse" = rest_field() - """The strategy used to chunk the file. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - usage_bytes: int, - created_at: datetime.datetime, - vector_store_id: str, - status: Union[str, "_models.VectorStoreFileStatus"], - last_error: "_models.VectorStoreFileError", - chunking_strategy: "_models.VectorStoreChunkingStrategyResponse", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["vector_store.file"] = "vector_store.file" - - -class VectorStoreFileBatch(_model_base.Model): - """A batch of files attached to a vector store. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always ``vector_store.file_batch``. Required. Default - value is "vector_store.files_batch". - :vartype object: str - :ivar created_at: The Unix timestamp (in seconds) for when the vector store files batch was - created. Required. - :vartype created_at: ~datetime.datetime - :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. - :vartype vector_store_id: str - :ivar status: The status of the vector store files batch, which can be either ``in_progress``\\ - , ``completed``\\ , ``cancelled`` or ``failed``. Required. Known values are: "in_progress", - "completed", "cancelled", and "failed". - :vartype status: str or ~azure.ai.client.models.VectorStoreFileBatchStatus - :ivar file_counts: Files count grouped by status processed or being processed by this vector - store. Required. - :vartype file_counts: ~azure.ai.client.models.VectorStoreFileCount - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["vector_store.files_batch"] = rest_field() - """The object type, which is always ``vector_store.file_batch``. Required. Default value is - \"vector_store.files_batch\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the vector store files batch was created. Required.""" - vector_store_id: str = rest_field() - """The ID of the vector store that the file is attached to. Required.""" - status: Union[str, "_models.VectorStoreFileBatchStatus"] = rest_field() - """The status of the vector store files batch, which can be either ``in_progress``\ , - ``completed``\ , ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", - \"completed\", \"cancelled\", and \"failed\".""" - file_counts: "_models.VectorStoreFileCount" = rest_field() - """Files count grouped by status processed or being processed by this vector store. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - created_at: datetime.datetime, - vector_store_id: str, - status: Union[str, "_models.VectorStoreFileBatchStatus"], - file_counts: "_models.VectorStoreFileCount", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["vector_store.files_batch"] = "vector_store.files_batch" - - -class VectorStoreFileCount(_model_base.Model): - """Counts of files processed or being processed by this vector store grouped by status. - - - :ivar in_progress: The number of files that are currently being processed. Required. - :vartype in_progress: int - :ivar completed: The number of files that have been successfully processed. Required. - :vartype completed: int - :ivar failed: The number of files that have failed to process. Required. - :vartype failed: int - :ivar cancelled: The number of files that were cancelled. Required. - :vartype cancelled: int - :ivar total: The total number of files. Required. - :vartype total: int - """ - - in_progress: int = rest_field() - """The number of files that are currently being processed. Required.""" - completed: int = rest_field() - """The number of files that have been successfully processed. Required.""" - failed: int = rest_field() - """The number of files that have failed to process. Required.""" - cancelled: int = rest_field() - """The number of files that were cancelled. Required.""" - total: int = rest_field() - """The total number of files. Required.""" - - @overload - def __init__( - self, - *, - in_progress: int, - completed: int, - failed: int, - cancelled: int, - total: int, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class VectorStoreFileDeletionStatus(_model_base.Model): - """Response object for deleting a vector store file relationship. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The ID of the resource specified for deletion. Required. - :vartype id: str - :ivar deleted: A value indicating whether deletion was successful. Required. - :vartype deleted: bool - :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value - is "vector_store.file.deleted". - :vartype object: str - """ - - id: str = rest_field() - """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() - """A value indicating whether deletion was successful. Required.""" - object: Literal["vector_store.file.deleted"] = rest_field() - """The object type, which is always 'vector_store.deleted'. Required. Default value is - \"vector_store.file.deleted\".""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - deleted: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["vector_store.file.deleted"] = "vector_store.file.deleted" - - -class VectorStoreFileError(_model_base.Model): - """Details on the error that may have ocurred while processing a file for this vector store. - - - :ivar code: One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: - "internal_error", "file_not_found", "parsing_error", and "unhandled_mime_type". - :vartype code: str or ~azure.ai.client.models.VectorStoreFileErrorCode - :ivar message: A human-readable description of the error. Required. - :vartype message: str - """ - - code: Union[str, "_models.VectorStoreFileErrorCode"] = rest_field() - """One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: - \"internal_error\", \"file_not_found\", \"parsing_error\", and \"unhandled_mime_type\".""" - message: str = rest_field() - """A human-readable description of the error. Required.""" - - @overload - def __init__( - self, - *, - code: Union[str, "_models.VectorStoreFileErrorCode"], - message: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class VectorStoreStaticChunkingStrategyOptions(_model_base.Model): - """Options to configure a vector store static chunking strategy. - - - :ivar max_chunk_size_tokens: The maximum number of tokens in each chunk. The default value is - 800. The minimum value is 100 and the maximum value is 4096. Required. - :vartype max_chunk_size_tokens: int - :ivar chunk_overlap_tokens: The number of tokens that overlap between chunks. The default value - is 400. - Note that the overlap must not exceed half of max_chunk_size_tokens. *. Required. - :vartype chunk_overlap_tokens: int - """ - - max_chunk_size_tokens: int = rest_field() - """The maximum number of tokens in each chunk. The default value is 800. The minimum value is 100 - and the maximum value is 4096. Required.""" - chunk_overlap_tokens: int = rest_field() - """The number of tokens that overlap between chunks. The default value is 400. - Note that the overlap must not exceed half of max_chunk_size_tokens. *. Required.""" - - @overload - def __init__( - self, - *, - max_chunk_size_tokens: int, - chunk_overlap_tokens: int, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, **kwargs) - - -class VectorStoreStaticChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="static"): - """A statically configured chunking strategy. - - All required parameters must be populated in order to send to server. - - :ivar type: The object type, which is always 'static'. Required. - :vartype type: str or ~azure.ai.client.models.STATIC - :ivar static: The options for the static chunking strategy. Required. - :vartype static: ~azure.ai.client.models.VectorStoreStaticChunkingStrategyOptions - """ - - type: Literal[VectorStoreChunkingStrategyRequestType.STATIC] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'static'. Required.""" - static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field() - """The options for the static chunking strategy. Required.""" - - @overload - def __init__( - self, - *, - static: "_models.VectorStoreStaticChunkingStrategyOptions", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.STATIC, **kwargs) - - -class VectorStoreStaticChunkingStrategyResponse( - VectorStoreChunkingStrategyResponse, discriminator="static" -): # pylint: disable=name-too-long - """A statically configured chunking strategy. - - - :ivar type: The object type, which is always 'static'. Required. - :vartype type: str or ~azure.ai.client.models.STATIC - :ivar static: The options for the static chunking strategy. Required. - :vartype static: ~azure.ai.client.models.VectorStoreStaticChunkingStrategyOptions - """ - - type: Literal[VectorStoreChunkingStrategyResponseType.STATIC] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'static'. Required.""" - static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field() - """The options for the static chunking strategy. Required.""" - - @overload - def __init__( - self, - *, - static: "_models.VectorStoreStaticChunkingStrategyOptions", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=useless-super-delegation - super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.STATIC, **kwargs) diff --git a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py deleted file mode 100644 index 82bbce9de88c..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/models/_patch.py +++ /dev/null @@ -1,1015 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -import datetime -import inspect -import json -import logging -import base64 -import asyncio - -from azure.core.credentials import TokenCredential, AccessToken - -from ._enums import AgentStreamEvent, ConnectionType -from ._models import ( - ConnectionsListSecretsResponse, - MessageDeltaChunk, - SubmitToolOutputsAction, - ThreadRun, - RunStep, - ThreadMessage, - RunStepDeltaChunk, - FunctionToolDefinition, - FunctionDefinition, - ToolDefinition, - ToolResources, - FileSearchToolDefinition, - FileSearchToolResource, - CodeInterpreterToolDefinition, - CodeInterpreterToolResource, - RequiredFunctionToolCall, -) - -from abc import ABC, abstractmethod -from typing import AsyncIterator, Awaitable, Callable, List, Dict, Any, Type, Optional, Iterator, Tuple, Set, get_origin - -logger = logging.getLogger(__name__) - - -def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[str, Any]: - """ - Remove the parameters, non present in class public fields; return shallow copy of a dictionary. - - **Note:** Classes inherited from the model check that the parameters are present - in the list of attributes and if they are not, the error is being raised. This check may not - be relevant for classes, not inherited from azure.ai.client._model_base.Model. - :param model_class: The class of model to be used. - :param parameters: The parsed dictionary with parameters. - :return: The dictionary with all invalid parameters removed. - """ - new_params = {} - valid_parameters = set( - filter( - lambda x: not x.startswith("_") and hasattr(model_class.__dict__[x], "_type"), model_class.__dict__.keys() - ) - ) - for k in filter(lambda x: x in valid_parameters, parameters.keys()): - new_params[k] = parameters[k] - return new_params - - -def _safe_instantiate(model_class: Type, parameters: Dict[str, Any]) -> Any: - """ - Instantiate class with the set of parameters from the server. - - :param model_class: The class of model to be used. - :param parameters: The parsed dictionary with parameters. - :return: The class of model_class type if parameters is a dictionary, or the parameters themselves otherwise. - """ - if not isinstance(parameters, dict): - return parameters - return model_class(**_filter_parameters(model_class, parameters)) - - -class ConnectionProperties: - """The properties of a single connection. - - :ivar id: A unique identifier for the connection. - :vartype id: str - :ivar name: The friendly name of the connection. - :vartype name: str - :ivar authentication_type: The authentication type used by the connection. - :vartype authentication_type: ~azure.ai.client.models._models.AuthenticationType - :ivar connection_type: The connection type . - :vartype connection_type: ~azure.ai.client.models._models.ConnectionType - :ivar endpoint_url: The endpoint URL associated with this connection - :vartype endpoint_url: str - :ivar key: The api-key to be used when accessing the connection. - :vartype key: str - :ivar token_credential: The TokenCredential to be used when accessing the connection. - :vartype token_credential: ~azure.core.credentials.TokenCredential - """ - - def __init__(self, *, connection: ConnectionsListSecretsResponse, token_credential: TokenCredential = None) -> None: - self.id = connection.id - self.name = connection.name - self.authentication_type = connection.properties.auth_type - self.connection_type = connection.properties.category - self.endpoint_url = ( - connection.properties.target[:-1] - if connection.properties.target.endswith("/") - else connection.properties.target - ) - self.key: str = None - if hasattr(connection.properties, "credentials"): - if hasattr(connection.properties.credentials, "key"): - self.key = connection.properties.credentials.key - self.token_credential = token_credential - - def to_evaluator_model_config(self, deployment_name, api_version) -> Dict[str, str]: - connection_type = self.connection_type.value - if self.connection_type.value == ConnectionType.AZURE_OPEN_AI: - connection_type = "azure_openai" - - if self.authentication_type == "ApiKey": - model_config = { - "azure_deployment": deployment_name, - "azure_endpoint": self.endpoint_url, - "type": connection_type, - "api_version": api_version, - "api_key": f"{self.id}/credentials/key", - } - else: - model_config = { - "azure_deployment": deployment_name, - "azure_endpoint": self.endpoint_url, - "type": self.connection_type, - "api_version": api_version, - } - return model_config - - def __str__(self): - out = "{\n" - out += f' "name": "{self.name}",\n' - out += f' "id": "{self.id}",\n' - out += f' "authentication_type": "{self.authentication_type}",\n' - out += f' "connection_type": "{self.connection_type}",\n' - out += f' "endpoint_url": "{self.endpoint_url}",\n' - if self.key: - out += f' "key": "{self.key}",\n' - else: - out += f' "key": null,\n' - if self.token_credential: - access_token = self.token_credential.get_token("https://cognitiveservices.azure.com/.default") - out += f' "token_credential": "{access_token.token}", expires on {access_token.expires_on} ({datetime.datetime.fromtimestamp(access_token.expires_on, datetime.timezone.utc)})\n' - else: - out += f' "token_credential": null\n' - out += "}\n" - return out - - -class SASTokenCredential(TokenCredential): - def __init__( - self, - *, - sas_token: str, - credential: TokenCredential, - subscription_id: str, - resource_group_name: str, - project_name: str, - connection_name: str, - ): - self._sas_token = sas_token - self._credential = credential - self._subscription_id = subscription_id - self._resource_group_name = resource_group_name - self._project_name = project_name - self._connection_name = connection_name - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) - - @classmethod - def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime: - payload = jwt_token.split(".")[1] - padded_payload = payload + "=" * (4 - len(payload) % 4) # Add padding if necessary - decoded_bytes = base64.urlsafe_b64decode(padded_payload) - decoded_str = decoded_bytes.decode("utf-8") - decoded_payload = json.loads(decoded_str) - expiration_date = decoded_payload.get("exp") - return datetime.datetime.fromtimestamp(expiration_date, datetime.timezone.utc) - - def _refresh_token(self) -> None: - logger.debug("[SASTokenCredential._refresh_token] Enter") - from azure.ai.client import AzureAIClient - - ai_client = AzureAIClient( - credential=self._credential, - endpoint="not-needed", # Since we are only going to use the "endpoints" operations, we don't need to supply an endpoint. http://management.azure.com is hard coded in the SDK. - subscription_id=self._subscription_id, - resource_group_name=self._resource_group_name, - project_name=self._project_name, - ) - - connection = ai_client.endpoints.get(connection_name=self._connection_name, populate_secrets=True) - - self._sas_token = connection.properties.credentials.sas - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) - - def get_token(self) -> AccessToken: - logger.debug("SASTokenCredential.get_token] Enter") - if self._expires_on < datetime.datetime.now(datetime.timezone.utc): - self._refresh_token() - return AccessToken(self._sas_token, self._expires_on.timestamp()) - - -# Define type_map to translate Python type annotations to JSON Schema types -type_map = { - "str": "string", - "int": "integer", - "float": "number", - "bool": "boolean", - "bytes": "string", # Typically encoded as base64-encoded strings in JSON - "NoneType": "null", - "datetime": "string", # Use format "date-time" - "date": "string", # Use format "date" - "UUID": "string", # Use format "uuid" -} - - -def _map_type(annotation) -> str: - - if annotation == inspect.Parameter.empty: - return "string" # Default type if annotation is missing - - origin = get_origin(annotation) - - if origin in {list, List}: - return "array" - elif origin in {dict, Dict}: - return "object" - elif hasattr(annotation, "__name__"): - return type_map.get(annotation.__name__, "string") - elif isinstance(annotation, type): - return type_map.get(annotation.__name__, "string") - - return "string" # Fallback to "string" if type is unrecognized - - -class Tool(ABC): - """ - An abstract class representing a tool that can be used by an agent. - """ - - @property - @abstractmethod - def definitions(self) -> List[ToolDefinition]: - """Get the tool definitions.""" - pass - - @property - @abstractmethod - def resources(self) -> ToolResources: - """Get the tool resources.""" - pass - - @abstractmethod - def execute(self, tool_call: Any) -> Any: - """ - Execute the tool with the provided tool call. - - :param tool_call: The tool call to execute. - :return: The output of the tool operations. - """ - pass - - -class FunctionTool(Tool): - """ - A tool that executes user-defined functions. - """ - - def __init__(self, functions: Set[Callable[[], Any]]): - """ - Initialize FunctionTool with a dictionary of functions. - - :param functions: A set of function objects. - """ - self._functions = self._create_function_dict(functions) - self._definitions = self._build_function_definitions(functions) - - def _create_function_dict(self, funcs: Set[Callable[[], Any]]) -> Dict[str, Callable[[], Any]]: - func_dict = {func.__name__: func for func in funcs} - return func_dict - - def _build_function_definitions(self, functions: Set[Callable[[], Any]]) -> List[ToolDefinition]: - specs = [] - for func in functions: - sig = inspect.signature(func) - params = sig.parameters - docstring = inspect.getdoc(func) - description = docstring.split("\n")[0] if docstring else "No description" - - properties = {} - for param_name, param in params.items(): - param_type = _map_type(param.annotation) - param_description = param.annotation.__doc__ if param.annotation != inspect.Parameter.empty else None - properties[param_name] = {"type": param_type, "description": param_description} - - function_def = FunctionDefinition( - name=func.__name__, - description=description, - parameters={"type": "object", "properties": properties, "required": list(params.keys())}, - ) - tool_def = FunctionToolDefinition(function=function_def) - specs.append(tool_def) - return specs - - def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, Dict[str, Any]]: - function_name = tool_call.function.name - arguments = tool_call.function.arguments - - if function_name not in self._functions: - logging.error(f"Function '{function_name}' not found.") - raise ValueError(f"Function '{function_name}' not found.") - - function = self._functions[function_name] - - try: - parsed_arguments = json.loads(arguments) - except json.JSONDecodeError as e: - logging.error(f"Invalid JSON arguments for function '{function_name}': {e}") - raise ValueError(f"Invalid JSON arguments: {e}") from e - - if not isinstance(parsed_arguments, dict): - logging.error(f"Arguments must be a JSON object for function '{function_name}'.") - raise TypeError("Arguments must be a JSON object.") - - return function, parsed_arguments - - def execute(self, tool_call: RequiredFunctionToolCall) -> Any: - function, parsed_arguments = self._get_func_and_args(tool_call) - - try: - return function(**parsed_arguments) if parsed_arguments else function() - except TypeError as e: - logging.error(f"Error executing function '{tool_call.function.name}': {e}") - raise - - @property - def definitions(self) -> List[ToolDefinition]: - """ - Get the function definitions. - - :return: A list of function definitions. - """ - return self._definitions - - @property - def resources(self) -> ToolResources: - """ - Get the tool resources for the agent. - - :return: An empty ToolResources as FunctionTool doesn't have specific resources. - """ - return ToolResources() - - -class AsyncFunctionTool(FunctionTool): - - async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: - function, parsed_arguments = self._get_func_and_args(tool_call) - - try: - if inspect.iscoroutinefunction(function): - return await function(**parsed_arguments) if parsed_arguments else await function() - else: - return function(**parsed_arguments) if parsed_arguments else function() - except TypeError as e: - logging.error(f"Error executing function '{tool_call.function.name}': {e}") - raise - - -class FileSearchTool(Tool): - """ - A tool that searches for uploaded file information from the created vector stores. - """ - - def __init__(self, vector_store_ids: List[str] = []): - self.vector_store_ids = vector_store_ids - - def add_vector_store(self, store_id: str): - """ - Add a vector store ID to the list of vector stores to search for files. - """ - self.vector_store_ids.append(store_id) - - def remove_vector_store(self, store_id: str): - """ - Remove a vector store ID from the list of vector stores to search for files. - """ - self.vector_store_ids.remove(store_id) - - @property - def definitions(self) -> List[ToolDefinition]: - """ - Get the file search tool definitions. - """ - return [FileSearchToolDefinition()] - - @property - def resources(self) -> ToolResources: - """ - Get the file search resources. - """ - return ToolResources(file_search=FileSearchToolResource(vector_store_ids=self.vector_store_ids)) - - def execute(self, tool_call: Any) -> Any: - pass - - -class CodeInterpreterTool(Tool): - """ - A tool that interprets code files uploaded to the agent. - """ - - def __init__(self): - self.file_ids = [] - - def add_file(self, file_id: str): - """ - Add a file ID to the list of files to interpret. - - :param file_id: The ID of the file to interpret. - """ - self.file_ids.append(file_id) - - def remove_file(self, file_id: str): - """ - Remove a file ID to the list of files to interpret. - - :param file_id: The ID of the file to remove. - """ - self.file_ids.remove(file_id) - - @property - def definitions(self) -> List[ToolDefinition]: - """ - Get the code interpreter tool definitions. - """ - return [CodeInterpreterToolDefinition()] - - @property - def resources(self) -> ToolResources: - """ - Get the code interpreter resources. - """ - return ToolResources(code_interpreter=CodeInterpreterToolResource(file_ids=self.file_ids)) - - def execute(self, tool_call: Any) -> Any: - pass - - -class ToolSet: - """ - A collection of tools that can be used by an agent. - """ - - def __init__(self): - self._tools: List[Tool] = [] - - def validate_tool_type(self, tool_type: Type[Tool]) -> None: - """ - Validate the type of the tool. - - :param tool_type: The type of the tool to validate. - :raises ValueError: If the tool type is not a subclass of Tool. - """ - if isinstance(tool_type, AsyncFunctionTool): - raise ValueError( - "AsyncFunctionTool is not supported in ToolSet. To use async functions, use AsyncToolSet and agents operations in azure.ai.client.aio." - ) - - def add(self, tool: Tool): - """ - Add a tool to the tool set. - - :param tool: The tool to add. - :raises ValueError: If a tool of the same type already exists. - """ - self.validate_tool_type(type(tool)) - - if any(isinstance(existing_tool, type(tool)) for existing_tool in self._tools): - raise ValueError("Tool of type {type(tool).__name__} already exists in the ToolSet.") - self._tools.append(tool) - - def remove(self, tool_type: Type[Tool]) -> None: - """ - Remove a tool of the specified type from the tool set. - - :param tool_type: The type of tool to remove. - :raises ValueError: If a tool of the specified type is not found. - """ - for i, tool in enumerate(self._tools): - if isinstance(tool, tool_type): - del self._tools[i] - logging.info(f"Tool of type {tool_type.__name__} removed from the ToolSet.") - return - raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") - - @property - def definitions(self) -> List[ToolDefinition]: - """ - Get the definitions for all tools in the tool set. - """ - tools = [] - for tool in self._tools: - tools.extend(tool.definitions) - return tools - - @property - def resources(self) -> ToolResources: - """ - Get the resources for all tools in the tool set. - """ - tool_resources = {} - for tool in self._tools: - resources = tool.resources - for key, value in resources.items(): - if key in tool_resources: - if isinstance(tool_resources[key], dict) and isinstance(value, dict): - tool_resources[key].update(value) - else: - tool_resources[key] = value - return self._create_tool_resources_from_dict(tool_resources) - - def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolResources: - """ - Safely converts a dictionary into a ToolResources instance. - """ - try: - return ToolResources(**resources) - except TypeError as e: - logging.error(f"Error creating ToolResources: {e}") - raise ValueError("Invalid resources for ToolResources.") from e - - def get_definitions_and_resources(self) -> Dict[str, Any]: - """ - Get the definitions and resources for all tools in the tool set. - - :return: A dictionary containing the tool resources and definitions. - """ - return { - "tool_resources": self.resources, - "tools": self.definitions, - } - - def get_tool(self, tool_type: Type[Tool]) -> Tool: - """ - Get a tool of the specified type from the tool set. - - :param tool_type: The type of tool to get. - :return: The tool of the specified type. - :raises ValueError: If a tool of the specified type is not found. - """ - for tool in self._tools: - if isinstance(tool, tool_type): - return tool - raise ValueError(f"Tool of type {tool_type.__name__} not found.") - - def execute_tool_calls(self, tool_calls: List[Any]) -> Any: - """ - Execute a tool of the specified type with the provided tool calls. - - :param tool_calls: A list of tool calls to execute. - :return: The output of the tool operations. - """ - tool_outputs = [] - - for tool_call in tool_calls: - try: - if tool_call.type == "function": - tool = self.get_tool(FunctionTool) - output = tool.execute(tool_call) - tool_output = { - "tool_call_id": tool_call.id, - "output": output, - } - tool_outputs.append(tool_output) - except Exception as e: - logging.error(f"Failed to execute tool call {tool_call}: {e}") - - return tool_outputs - - -class AsyncToolSet(ToolSet): - - def validate_tool_type(self, tool_type: Type[Tool]) -> None: - """ - Validate the type of the tool. - - :param tool_type: The type of the tool to validate. - :raises ValueError: If the tool type is not a subclass of Tool. - """ - if isinstance(tool_type, FunctionTool): - raise ValueError( - "FunctionTool is not supported in AsyncToolSet. Please use AsyncFunctionTool instead and provide sync and/or async function(s)." - ) - - async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: - """ - Execute a tool of the specified type with the provided tool calls. - - :param tool_calls: A list of tool calls to execute. - :return: The output of the tool operations. - """ - tool_outputs = [] - - for tool_call in tool_calls: - try: - if tool_call.type == "function": - tool = self.get_tool(AsyncFunctionTool) - output = await tool.execute(tool_call) - tool_output = { - "tool_call_id": tool_call.id, - "output": output, - } - tool_outputs.append(tool_output) - except Exception as e: - logging.error(f"Failed to execute tool call {tool_call}: {e}") - - return tool_outputs - - -class AgentEventHandler: - - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - """Handle message delta events.""" - pass - - def on_thread_message(self, message: "ThreadMessage") -> None: - """Handle thread message events.""" - pass - - def on_thread_run(self, run: "ThreadRun") -> None: - """Handle thread run events.""" - pass - - def on_run_step(self, step: "RunStep") -> None: - """Handle run step events.""" - pass - - def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: - """Handle run step delta events.""" - pass - - def on_error(self, data: str) -> None: - """Handle error events.""" - pass - - def on_done(self) -> None: - """Handle the completion of the stream.""" - pass - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - """Handle any unhandled event types.""" - pass - - -class AsyncAgentEventHandler: - - async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - """Handle message delta events.""" - pass - - async def on_thread_message(self, message: "ThreadMessage") -> None: - """Handle thread message events.""" - pass - - async def on_thread_run(self, run: "ThreadRun") -> None: - """Handle thread run events.""" - pass - - async def on_run_step(self, step: "RunStep") -> None: - """Handle run step events.""" - pass - - async def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: - """Handle run step delta events.""" - pass - - async def on_error(self, data: str) -> None: - """Handle error events.""" - pass - - async def on_done(self) -> None: - """Handle the completion of the stream.""" - pass - - async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - """Handle any unhandled event types.""" - pass - - -class AsyncAgentRunStream(AsyncIterator[Tuple[str, Any]]): - def __init__( - self, - response_iterator: AsyncIterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, Optional[AsyncAgentEventHandler]], Awaitable[None]], - event_handler: Optional["AsyncAgentEventHandler"] = None, - ): - self.response_iterator = response_iterator - self.event_handler = event_handler - self.done = False - self.buffer = "" - self.submit_tool_outputs = submit_tool_outputs - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_val, exc_tb): - close_method = getattr(self.response_iterator, "close", None) - if callable(close_method): - result = close_method() - if asyncio.iscoroutine(result): - await result - - def __aiter__(self): - return self - - async def __anext__(self) -> Tuple[str, Any]: - while True: - try: - chunk = await self.response_iterator.__anext__() - self.buffer += chunk.decode("utf-8") - except StopAsyncIteration: - if self.buffer: - event_data_str, self.buffer = self.buffer, "" - if event_data_str: - return await self._process_event(event_data_str) - raise StopAsyncIteration - - while "\n\n" in self.buffer: - event_data_str, self.buffer = self.buffer.split("\n\n", 1) - return await self._process_event(event_data_str) - - def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: - event_lines = event_data_str.strip().split("\n") - event_type = None - event_data = "" - - for line in event_lines: - if line.startswith("event:"): - event_type = line.split(":", 1)[1].strip() - elif line.startswith("data:"): - event_data = line.split(":", 1)[1].strip() - - if not event_type: - raise ValueError("Event type not specified in the event data.") - - try: - parsed_data = json.loads(event_data) - except json.JSONDecodeError: - parsed_data = event_data - - # Workaround for service bug: Rename 'expires_at' to 'expired_at' - if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: - parsed_data["expired_at"] = parsed_data.pop("expires_at") - - # Map to the appropriate class instance - if event_type in { - AgentStreamEvent.THREAD_RUN_CREATED, - AgentStreamEvent.THREAD_RUN_QUEUED, - AgentStreamEvent.THREAD_RUN_IN_PROGRESS, - AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION, - AgentStreamEvent.THREAD_RUN_COMPLETED, - AgentStreamEvent.THREAD_RUN_FAILED, - AgentStreamEvent.THREAD_RUN_CANCELLING, - AgentStreamEvent.THREAD_RUN_CANCELLED, - AgentStreamEvent.THREAD_RUN_EXPIRED, - }: - event_data_obj = _safe_instantiate(ThreadRun, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_RUN_STEP_CREATED, - AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, - AgentStreamEvent.THREAD_RUN_STEP_COMPLETED, - AgentStreamEvent.THREAD_RUN_STEP_FAILED, - AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, - AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, - }: - event_data_obj = _safe_instantiate(RunStep, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_MESSAGE_CREATED, - AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, - AgentStreamEvent.THREAD_MESSAGE_COMPLETED, - AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, - }: - event_data_obj = _safe_instantiate(ThreadMessage, parsed_data) - elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: - event_data_obj = _safe_instantiate(MessageDeltaChunk, parsed_data) - elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: - event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) - else: - event_data_obj = parsed_data - - return event_type, event_data_obj - - async def _process_event(self, event_data_str: str) -> Tuple[str, Any]: - event_type, event_data_obj = self._parse_event_data(event_data_str) - - if ( - isinstance(event_data_obj, ThreadRun) - and event_data_obj.status == "requires_action" - and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) - ): - await self.submit_tool_outputs(event_data_obj, self.event_handler) - if self.event_handler: - try: - if isinstance(event_data_obj, MessageDeltaChunk): - await self.event_handler.on_message_delta(event_data_obj) - elif isinstance(event_data_obj, ThreadMessage): - await self.event_handler.on_thread_message(event_data_obj) - elif isinstance(event_data_obj, ThreadRun): - await self.event_handler.on_thread_run(event_data_obj) - elif isinstance(event_data_obj, RunStep): - await self.event_handler.on_run_step(event_data_obj) - elif isinstance(event_data_obj, RunStepDeltaChunk): - await self.event_handler.on_run_step_delta(event_data_obj) - elif event_type == AgentStreamEvent.ERROR: - await self.event_handler.on_error(event_data_obj) - elif event_type == AgentStreamEvent.DONE: - await self.event_handler.on_done() - self.done = True # Mark the stream as done - else: - await self.event_handler.on_unhandled_event(event_type, event_data_obj) - except Exception as e: - logging.error(f"Error in event handler for event '{event_type}': {e}") - - return event_type, event_data_obj - - async def until_done(self) -> None: - """ - Iterates through all events until the stream is marked as done. - """ - try: - async for _ in self: - pass # The EventHandler handles the events - except StopAsyncIteration: - pass - - -class AgentRunStream(Iterator[Tuple[str, Any]]): - def __init__( - self, - response_iterator: Iterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, Optional[AgentEventHandler]], None], - event_handler: Optional[AgentEventHandler] = None, - ): - self.response_iterator = response_iterator - self.event_handler = event_handler - self.done = False - self.buffer = "" - self.submit_tool_outputs = submit_tool_outputs - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - close_method = getattr(self.response_iterator, "close", None) - if callable(close_method): - close_method() - - def __iter__(self): - return self - - def __next__(self) -> Tuple[str, Any]: - if self.done: - raise StopIteration - while True: - try: - chunk = next(self.response_iterator) - self.buffer += chunk.decode("utf-8") - except StopIteration: - if self.buffer: - event_data_str, self.buffer = self.buffer, "" - if event_data_str: - return self._process_event(event_data_str) - raise StopIteration - - while "\n\n" in self.buffer: - event_data_str, self.buffer = self.buffer.split("\n\n", 1) - return self._process_event(event_data_str) - - def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: - event_lines = event_data_str.strip().split("\n") - event_type = None - event_data = "" - - for line in event_lines: - if line.startswith("event:"): - event_type = line.split(":", 1)[1].strip() - elif line.startswith("data:"): - event_data = line.split(":", 1)[1].strip() - - if not event_type: - raise ValueError("Event type not specified in the event data.") - - try: - parsed_data = json.loads(event_data) - except json.JSONDecodeError: - parsed_data = event_data - - # Workaround for service bug: Rename 'expires_at' to 'expired_at' - if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: - parsed_data["expired_at"] = parsed_data.pop("expires_at") - - # Map to the appropriate class instance - if event_type in { - AgentStreamEvent.THREAD_RUN_CREATED, - AgentStreamEvent.THREAD_RUN_QUEUED, - AgentStreamEvent.THREAD_RUN_IN_PROGRESS, - AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION, - AgentStreamEvent.THREAD_RUN_COMPLETED, - AgentStreamEvent.THREAD_RUN_FAILED, - AgentStreamEvent.THREAD_RUN_CANCELLING, - AgentStreamEvent.THREAD_RUN_CANCELLED, - AgentStreamEvent.THREAD_RUN_EXPIRED, - }: - event_data_obj = _safe_instantiate(ThreadRun, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_RUN_STEP_CREATED, - AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, - AgentStreamEvent.THREAD_RUN_STEP_COMPLETED, - AgentStreamEvent.THREAD_RUN_STEP_FAILED, - AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, - AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, - }: - event_data_obj = _safe_instantiate(RunStep, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_MESSAGE_CREATED, - AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, - AgentStreamEvent.THREAD_MESSAGE_COMPLETED, - AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, - }: - event_data_obj = _safe_instantiate(ThreadMessage, parsed_data) - elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: - event_data_obj = _safe_instantiate(MessageDeltaChunk, parsed_data) - elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: - event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) - else: - event_data_obj = parsed_data - - return event_type, event_data_obj - - def _process_event(self, event_data_str: str) -> Tuple[str, Any]: - event_type, event_data_obj = self._parse_event_data(event_data_str) - - if ( - isinstance(event_data_obj, ThreadRun) - and event_data_obj.status == "requires_action" - and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) - ): - self.submit_tool_outputs(event_data_obj, self.event_handler) - if self.event_handler: - try: - if isinstance(event_data_obj, MessageDeltaChunk): - self.event_handler.on_message_delta(event_data_obj) - elif isinstance(event_data_obj, ThreadMessage): - self.event_handler.on_thread_message(event_data_obj) - elif isinstance(event_data_obj, ThreadRun): - self.event_handler.on_thread_run(event_data_obj) - elif isinstance(event_data_obj, RunStep): - self.event_handler.on_run_step(event_data_obj) - elif isinstance(event_data_obj, RunStepDeltaChunk): - self.event_handler.on_run_step_delta(event_data_obj) - elif event_type == AgentStreamEvent.ERROR: - self.event_handler.on_error(event_data_obj) - elif event_type == AgentStreamEvent.DONE: - self.event_handler.on_done() - self.done = True # Mark the stream as done - else: - self.event_handler.on_unhandled_event(event_type, event_data_obj) - except Exception as e: - logging.error(f"Error in event handler for event '{event_type}': {e}") - - return event_type, event_data_obj - - def until_done(self) -> None: - """ - Iterates through all events until the stream is marked as done. - """ - try: - for _ in self: - pass # The EventHandler handles the events - except StopIteration: - pass - - -__all__: List[str] = [ - "AgentEventHandler", - "AgentRunStream", - "AsyncAgentEventHandler", - "AsyncAgentRunStream", - "AsyncFunctionTool", - "AsyncToolSet", - "CodeInterpreterTool", - "FileSearchTool", - "FunctionTool", - "SASTokenCredential", - "Tool", - "ToolSet", -] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py deleted file mode 100644 index 56224bae24a5..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._operations import AgentsOperations -from ._operations import ConnectionsOperations -from ._operations import EvaluationsOperations - -from ._patch import __all__ as _patch_all -from ._patch import * # pylint: disable=unused-wildcard-import -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "AgentsOperations", - "ConnectionsOperations", - "EvaluationsOperations", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore -_patch_sdk() diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py deleted file mode 100644 index a604b3ea24f9..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_operations.py +++ /dev/null @@ -1,7392 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import json -import sys -from typing import Any, Callable, Dict, IO, Iterable, List, Optional, TYPE_CHECKING, TypeVar, Union, overload -import urllib.parse - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - StreamClosedError, - StreamConsumedError, - map_error, -) -from azure.core.paging import ItemPaged -from azure.core.pipeline import PipelineResponse -from azure.core.rest import HttpRequest, HttpResponse -from azure.core.tracing.decorator import distributed_trace -from azure.core.utils import case_insensitive_dict - -from .. import _model_base, models as _models -from .._model_base import SdkJSONEncoder, _deserialize -from .._serialization import Serializer -from .._vendor import FileType, prepare_multipart_form_data - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore - -if TYPE_CHECKING: - from .. import _types -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False - - -def build_agents_create_agent_request(**kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/assistants" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_agents_request( - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/assistants" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/assistants/{assistantId}" - path_format_arguments = { - "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_update_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/assistants/{assistantId}" - path_format_arguments = { - "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_delete_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/assistants/{assistantId}" - path_format_arguments = { - "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_thread_request(**kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_update_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_delete_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_message_request(thread_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/messages" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_messages_request( - thread_id: str, - *, - run_id: Optional[str] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/messages" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if run_id is not None: - _params["runId"] = _SERIALIZER.query("run_id", run_id, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/messages/{messageId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "messageId": _SERIALIZER.url("message_id", message_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_update_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/messages/{messageId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "messageId": _SERIALIZER.url("message_id", message_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_run_request(thread_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_runs_request( - thread_id: str, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs/{runId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "runId": _SERIALIZER.url("run_id", run_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_update_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs/{runId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "runId": _SERIALIZER.url("run_id", run_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_submit_tool_outputs_to_run_request( # pylint: disable=name-too-long - thread_id: str, run_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs/{runId}/submit_tool_outputs" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "runId": _SERIALIZER.url("run_id", run_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_cancel_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs/{runId}/cancel" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "runId": _SERIALIZER.url("run_id", run_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_thread_and_run_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/runs" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_run_step_request(thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs/{runId}/steps/{stepId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "runId": _SERIALIZER.url("run_id", run_id, "str"), - "stepId": _SERIALIZER.url("step_id", step_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_run_steps_request( - thread_id: str, - run_id: str, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs/{runId}/steps" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "runId": _SERIALIZER.url("run_id", run_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_files_request( - *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/files" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if purpose is not None: - _params["purpose"] = _SERIALIZER.query("purpose", purpose, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_upload_file_request(**kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/files" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_delete_file_request(file_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/files/{fileId}" - path_format_arguments = { - "fileId": _SERIALIZER.url("file_id", file_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_file_request(file_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/files/{fileId}" - path_format_arguments = { - "fileId": _SERIALIZER.url("file_id", file_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_file_content_request(file_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/files/{fileId}/content" - path_format_arguments = { - "fileId": _SERIALIZER.url("file_id", file_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_vector_stores_request( - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_vector_store_request(**kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_modify_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_delete_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_vector_store_files_request( # pylint: disable=name-too-long - vector_store_id: str, - *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/files" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if filter is not None: - _params["filter"] = _SERIALIZER.query("filter", filter, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_vector_store_file_request( # pylint: disable=name-too-long - vector_store_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/files" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_vector_store_file_request( # pylint: disable=name-too-long - vector_store_id: str, file_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/files/{fileId}" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - "fileId": _SERIALIZER.url("file_id", file_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_delete_vector_store_file_request( # pylint: disable=name-too-long - vector_store_id: str, file_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/files/{fileId}" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - "fileId": _SERIALIZER.url("file_id", file_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_vector_store_file_batch_request( # pylint: disable=name-too-long - vector_store_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/file_batches" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_vector_store_file_batch_request( # pylint: disable=name-too-long - vector_store_id: str, batch_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_cancel_vector_store_file_batch_request( # pylint: disable=name-too-long - vector_store_id: str, batch_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/cancel" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_vector_store_file_batch_files_request( # pylint: disable=name-too-long - vector_store_id: str, - batch_id: str, - *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/files" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if filter is not None: - _params["filter"] = _SERIALIZER.query("filter", filter, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_connections_list_request( - *, - category: Optional[Union[str, _models.ConnectionType]] = None, - include_all: Optional[bool] = None, - target: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/connections" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if category is not None: - _params["category"] = _SERIALIZER.query("category", category, "str") - if include_all is not None: - _params["includeAll"] = _SERIALIZER.query("include_all", include_all, "bool") - if target is not None: - _params["target"] = _SERIALIZER.query("target", target, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_connections_get_request(connection_name: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/connections/{connectionName}" - path_format_arguments = { - "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_connections_list_secrets_request(connection_name: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/connections/{connectionName}/listsecrets" - path_format_arguments = { - "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_get_request(id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/runs/{id}" - path_format_arguments = { - "id": _SERIALIZER.url("id", id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_create_request(**kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("apiVersion", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/runs:run" - - # Construct parameters - _params["apiVersion"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_list_request( - *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/runs" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if top is not None: - _params["top"] = _SERIALIZER.query("top", top, "int") - if skip is not None: - _params["skip"] = _SERIALIZER.query("skip", skip, "int") - if maxpagesize is not None: - _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_update_request(id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/runs/{id}" - path_format_arguments = { - "id": _SERIALIZER.url("id", id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_get_schedule_request(id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/schedules/{id}" - path_format_arguments = { - "id": _SERIALIZER.url("id", id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_create_or_replace_schedule_request( # pylint: disable=name-too-long - id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/schedules/{id}" - path_format_arguments = { - "id": _SERIALIZER.url("id", id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_list_schedule_request( - *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/schedules" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if top is not None: - _params["top"] = _SERIALIZER.query("top", top, "int") - if skip is not None: - _params["skip"] = _SERIALIZER.query("skip", skip, "int") - if maxpagesize is not None: - _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_delete_schedule_request(id: str, **kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/schedules/{id}" - path_format_arguments = { - "id": _SERIALIZER.url("id", id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -class AgentsOperations: # pylint: disable=too-many-public-methods - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.client.AzureAIClient`'s - :attr:`agents` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent( - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) - - if body is _Unset: - if model is _Unset: - raise TypeError("missing required argument: model") - body = { - "description": description, - "instructions": instructions, - "metadata": metadata, - "model": model, - "name": name, - "response_format": response_format, - "temperature": temperature, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_agent_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Agent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_agents( - self, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfAgent: - """Gets a list of agents that were previously created. - - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfAgent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) - - _request = build_agents_list_agents_request( - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: - """Retrieves an existing agent. - - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) - - _request = build_agents_get_agent_request( - assistant_id=assistant_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Agent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def update_agent( - self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_agent( - self, - assistant_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_agent( - self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update_agent( - self, - assistant_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) - - if body is _Unset: - body = { - "description": description, - "instructions": instructions, - "metadata": metadata, - "model": model, - "name": name, - "response_format": response_format, - "temperature": temperature, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_agent_request( - assistant_id=assistant_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Agent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: - """Deletes an agent. - - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str - :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_agent_request( - assistant_id=assistant_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_thread( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_thread( - self, - *, - content_type: str = "application/json", - messages: Optional[List[_models.ThreadMessageOptions]] = None, - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword messages: The initial messages to associate with the new thread. Default value is - None. - :paramtype messages: list[~azure.ai.client.models.ThreadMessageOptions] - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_thread( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_thread( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - messages: Optional[List[_models.ThreadMessageOptions]] = None, - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword messages: The initial messages to associate with the new thread. Default value is - None. - :paramtype messages: list[~azure.ai.client.models.ThreadMessageOptions] - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_thread_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentThread, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: - """Gets information about an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) - - _request = build_agents_get_thread_request( - thread_id=thread_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentThread, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def update_thread( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_thread( - self, - thread_id: str, - *, - content_type: str = "application/json", - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_thread( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update_thread( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.client.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"metadata": metadata, "tool_resources": tool_resources} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_thread_request( - thread_id=thread_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentThread, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: - """Deletes an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_thread_request( - thread_id=thread_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_message( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_message( - self, - thread_id: str, - *, - role: Union[str, _models.MessageRole], - content: str, - content_type: str = "application/json", - attachments: Optional[List[_models.MessageAttachment]] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword role: The role of the entity that is creating the message. Allowed values include: - - - * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Known values are: "user" and "assistant". Required. - :paramtype role: str or ~azure.ai.client.models.MessageRole - :keyword content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :paramtype content: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword attachments: A list of files attached to the message, and the tools they should be - added to. Default value is None. - :paramtype attachments: list[~azure.ai.client.models.MessageAttachment] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_message( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_message( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - role: Union[str, _models.MessageRole] = _Unset, - content: str = _Unset, - attachments: Optional[List[_models.MessageAttachment]] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword role: The role of the entity that is creating the message. Allowed values include: - - - * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Known values are: "user" and "assistant". Required. - :paramtype role: str or ~azure.ai.client.models.MessageRole - :keyword content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :paramtype content: str - :keyword attachments: A list of files attached to the message, and the tools they should be - added to. Default value is None. - :paramtype attachments: list[~azure.ai.client.models.MessageAttachment] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - - if body is _Unset: - if role is _Unset: - raise TypeError("missing required argument: role") - if content is _Unset: - raise TypeError("missing required argument: content") - body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_message_request( - thread_id=thread_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_messages( - self, - thread_id: str, - *, - run_id: Optional[str] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfThreadMessage: - """Gets a list of messages that exist on a thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword run_id: Filter messages by the run ID that generated them. Default value is None. - :paramtype run_id: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible - with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) - - _request = build_agents_list_messages_request( - thread_id=thread_id, - run_id=run_id, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: - """Gets an existing message from an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - - _request = build_agents_get_message_request( - thread_id=thread_id, - message_id=message_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def update_message( - self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_message( - self, - thread_id: str, - message_id: str, - *, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_message( - self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update_message( - self, - thread_id: str, - message_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"metadata": metadata} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_message_request( - thread_id=thread_id, - message_id=message_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_run( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_run( - self, - thread_id: str, - *, - assistant_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_run( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_run( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - if assistant_id is _Unset: - raise TypeError("missing required argument: assistant_id") - body = { - "additional_instructions": additional_instructions, - "additional_messages": additional_messages, - "assistant_id": assistant_id, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "stream": stream_parameter, - "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, - "top_p": top_p, - "truncation_strategy": truncation_strategy, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_run_request( - thread_id=thread_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_runs( - self, - thread_id: str, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfThreadRun: - """Gets a list of runs for a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) - - _request = build_agents_list_runs_request( - thread_id=thread_id, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: - """Gets an existing run from an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - _request = build_agents_get_run_request( - thread_id=thread_id, - run_id=run_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def update_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_run( - self, - thread_id: str, - run_id: str, - *, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"metadata": metadata} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_run_request( - thread_id=thread_id, - run_id=run_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - stream_parameter: Optional[bool] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. - :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword stream_parameter: If true, returns a stream of events that happen during the Run as - server-sent events, terminating when the run enters a terminal state. Default value is None. - :paramtype stream_parameter: bool - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - stream_parameter: Optional[bool] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. - :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] - :keyword stream_parameter: If true, returns a stream of events that happen during the Run as - server-sent events, terminating when the run enters a terminal state. Default value is None. - :paramtype stream_parameter: bool - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - if tool_outputs is _Unset: - raise TypeError("missing required argument: tool_outputs") - body = {"stream": stream_parameter, "tool_outputs": tool_outputs} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_submit_tool_outputs_to_run_request( - thread_id=thread_id, - run_id=run_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: - """Cancels a run of an in progress thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - _request = build_agents_cancel_run_request( - thread_id=thread_id, - run_id=run_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_thread_and_run( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_thread_and_run( - self, - *, - assistant_id: str, - content_type: str = "application/json", - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :keyword assistant_id: The ID of the agent for which the thread should be created. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword thread: The details used to create the new thread. If no thread is provided, an empty - one will be created. Default value is None. - :paramtype thread: ~azure.ai.client.models.AgentThreadCreationOptions - :keyword model: The overridden model that the agent should use to run the thread. Default value - is None. - :paramtype model: str - :keyword instructions: The overridden system instructions the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword tools: The overridden list of enabled tools the agent should use to run the thread. - Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.UpdateToolResourcesOptions - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort to use only - the number of completion tokens specified, across multiple turns of the run. If the run - exceeds the number of completion tokens - specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more - info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_thread_and_run( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_thread_and_run( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent for which the thread should be created. Required. - :paramtype assistant_id: str - :keyword thread: The details used to create the new thread. If no thread is provided, an empty - one will be created. Default value is None. - :paramtype thread: ~azure.ai.client.models.AgentThreadCreationOptions - :keyword model: The overridden model that the agent should use to run the thread. Default value - is None. - :paramtype model: str - :keyword instructions: The overridden system instructions the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword tools: The overridden list of enabled tools the agent should use to run the thread. - Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.UpdateToolResourcesOptions - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort to use only - the number of completion tokens specified, across multiple turns of the run. If the run - exceeds the number of completion tokens - specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more - info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - if assistant_id is _Unset: - raise TypeError("missing required argument: assistant_id") - body = { - "assistant_id": assistant_id, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "stream": stream_parameter, - "temperature": temperature, - "thread": thread, - "tool_choice": tool_choice, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - "truncation_strategy": truncation_strategy, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_thread_and_run_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> _models.RunStep: - """Gets a single run step from a thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param step_id: Identifier of the run step. Required. - :type step_id: str - :return: RunStep. The RunStep is compatible with MutableMapping - :rtype: ~azure.ai.client.models.RunStep - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) - - _request = build_agents_get_run_step_request( - thread_id=thread_id, - run_id=run_id, - step_id=step_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.RunStep, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_run_steps( - self, - thread_id: str, - run_id: str, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfRunStep: - """Gets a list of run steps from a thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfRunStep - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) - - _request = build_agents_list_run_steps_request( - thread_id=thread_id, - run_id=run_id, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_files( - self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any - ) -> _models.FileListResponse: - """Gets a list of previously uploaded files. - - :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", - "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is - None. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :return: FileListResponse. The FileListResponse is compatible with MutableMapping - :rtype: ~azure.ai.client.models.FileListResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) - - _request = build_agents_list_files_request( - purpose=purpose, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.FileListResponse, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file( - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: The file data, in bytes. Required. - :paramtype file: ~azure.ai.client._vendor.FileType - :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and - Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and - ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", - "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :keyword filename: The name of the file. Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def upload_file( - self, - body: JSON = _Unset, - *, - file: FileType = _Unset, - purpose: Union[str, _models.FilePurpose] = _Unset, - filename: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Is one of the following types: JSON Required. - :type body: JSON - :keyword file: The file data, in bytes. Required. - :paramtype file: ~azure.ai.client._vendor.FileType - :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and - Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and - ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", - "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :keyword filename: The name of the file. Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) - - if body is _Unset: - if file is _Unset: - raise TypeError("missing required argument: file") - if purpose is _Unset: - raise TypeError("missing required argument: purpose") - body = {"file": file, "filename": filename, "purpose": purpose} - body = {k: v for k, v in body.items() if v is not None} - _body = body.as_dict() if isinstance(body, _model_base.Model) else body - _file_fields: List[str] = ["file"] - _data_fields: List[str] = ["purpose", "filename"] - _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) - - _request = build_agents_upload_file_request( - api_version=self._config.api_version, - files=_files, - data=_data, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: - """Delete a previously uploaded file. - - :param file_id: The ID of the file to delete. Required. - :type file_id: str - :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.client.models.FileDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_file_request( - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.FileDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: - """Returns information about a specific file. Does not retrieve file content. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) - - _request = build_agents_get_file_request( - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentResponse: - """Returns information about a specific file. Does not retrieve file content. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: FileContentResponse. The FileContentResponse is compatible with MutableMapping - :rtype: ~azure.ai.client.models.FileContentResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.FileContentResponse] = kwargs.pop("cls", None) - - _request = build_agents_get_file_content_request( - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.FileContentResponse, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_vector_stores( - self, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfVectorStore: - """Returns a list of vector stores. - - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible - with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) - - _request = build_agents_list_vector_stores_request( - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_vector_store( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - - if body is _Unset: - body = { - "chunking_strategy": chunking_strategy, - "expires_after": expires_after, - "file_ids": file_ids, - "metadata": metadata, - "name": name, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_vector_store_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: - """Returns the vector store object matching the specified ID. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - - _request = build_agents_get_vector_store_request( - vector_store_id=vector_store_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def modify_vector_store( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def modify_vector_store( - self, - vector_store_id: str, - *, - content_type: str = "application/json", - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def modify_vector_store( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def modify_vector_store( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"expires_after": expires_after, "metadata": metadata, "name": name} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_modify_vector_store_request( - vector_store_id=vector_store_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: - """Deletes the vector store object matching the specified ID. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_vector_store_request( - vector_store_id=vector_store_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_vector_store_files( - self, - vector_store_id: str, - *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfVectorStoreFile: - """Returns a list of vector store files. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", - and "cancelled". Default value is None. - :paramtype filter: str or ~azure.ai.client.models.VectorStoreFileStatusFilter - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is - compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) - - _request = build_agents_list_vector_store_files_request( - vector_store_id=vector_store_id, - filter=filter, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_vector_store_file( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file( - self, - vector_store_id: str, - *, - file_id: str, - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_id: Identifier of the file. Required. - :paramtype file_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_file( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_id: str = _Unset, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_id: Identifier of the file. Required. - :paramtype file_id: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) - - if body is _Unset: - if file_id is _Unset: - raise TypeError("missing required argument: file_id") - body = {"chunking_strategy": chunking_strategy, "file_id": file_id} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_vector_store_file_request( - vector_store_id=vector_store_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: - """Retrieves a vector store file. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param file_id: Identifier of the file. Required. - :type file_id: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) - - _request = build_agents_get_vector_store_file_request( - vector_store_id=vector_store_id, - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete_vector_store_file( - self, vector_store_id: str, file_id: str, **kwargs: Any - ) -> _models.VectorStoreFileDeletionStatus: - """Delete a vector store file. This will remove the file from the vector store but the file itself - will not be deleted. - To delete the file, use the delete file endpoint. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param file_id: Identifier of the file. Required. - :type file_id: str - :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with - MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_vector_store_file_request( - vector_store_id=vector_store_id, - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_vector_store_file_batch( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_batch( - self, - vector_store_id: str, - *, - file_ids: List[str], - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_batch( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_file_batch( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: List[str] = _Unset, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - - if body is _Unset: - if file_ids is _Unset: - raise TypeError("missing required argument: file_ids") - body = {"chunking_strategy": chunking_strategy, "file_ids": file_ids} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_vector_store_file_batch_request( - vector_store_id=vector_store_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_vector_store_file_batch( - self, vector_store_id: str, batch_id: str, **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Retrieve a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param batch_id: Identifier of the file batch. Required. - :type batch_id: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - - _request = build_agents_get_vector_store_file_batch_request( - vector_store_id=vector_store_id, - batch_id=batch_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def cancel_vector_store_file_batch( - self, vector_store_id: str, batch_id: str, **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch - as soon as possible. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param batch_id: Identifier of the file batch. Required. - :type batch_id: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - - _request = build_agents_cancel_vector_store_file_batch_request( - vector_store_id=vector_store_id, - batch_id=batch_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_vector_store_file_batch_files( - self, - vector_store_id: str, - batch_id: str, - *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfVectorStoreFile: - """Returns a list of vector store files in a batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param batch_id: Identifier of the file batch. Required. - :type batch_id: str - :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", - and "cancelled". Default value is None. - :paramtype filter: str or ~azure.ai.client.models.VectorStoreFileStatusFilter - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.client.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is - compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIPageableListOfVectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) - - _request = build_agents_list_vector_store_file_batch_files_request( - vector_store_id=vector_store_id, - batch_id=batch_id, - filter=filter, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - -class ConnectionsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.client.AzureAIClient`'s - :attr:`connections` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace - def _list( # pylint: disable=protected-access - self, - *, - category: Optional[Union[str, _models.ConnectionType]] = None, - include_all: Optional[bool] = None, - target: Optional[str] = None, - **kwargs: Any - ) -> _models._models.ConnectionsListResponse: - """List the details of all the connections (not including their credentials). - - :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". Default value is None. - :paramtype category: str or ~azure.ai.client.models.ConnectionType - :keyword include_all: Indicates whether to list datastores. Service default: do not list - datastores. Default value is None. - :paramtype include_all: bool - :keyword target: Target of the workspace connection. Default value is None. - :paramtype target: str - :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping - :rtype: ~azure.ai.client.models._models.ConnectionsListResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) - - _request = build_connections_list_request( - category=category, - include_all=include_all, - target=target, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize( - _models._models.ConnectionsListResponse, response.json() # pylint: disable=protected-access - ) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def _get( # pylint: disable=protected-access - self, connection_name: str, **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: - """Get the details of a single connection, without credentials. - - :param connection_name: Connection Name. Required. - :type connection_name: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.client.models._models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) - - _request = build_connections_get_request( - connection_name=connection_name, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize( - _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access - ) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def _list_secrets( # pylint: disable=protected-access - self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... - @overload - def _list_secrets( # pylint: disable=protected-access - self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... - @overload - def _list_secrets( # pylint: disable=protected-access - self, connection_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... - - @distributed_trace - def _list_secrets( # pylint: disable=protected-access - self, connection_name: str, body: Union[JSON, IO[bytes]] = _Unset, *, ignored: str = _Unset, **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: - """Get the details of a single connection, including credentials (if available). - - :param connection_name: Connection Name. Required. - :type connection_name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword ignored: The body is ignored. TODO: Can we remove this?. Required. - :paramtype ignored: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.client.models._models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) - - if body is _Unset: - if ignored is _Unset: - raise TypeError("missing required argument: ignored") - body = {"ignored": ignored} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_connections_list_secrets_request( - connection_name=connection_name, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize( - _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access - ) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - -class EvaluationsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.client.AzureAIClient`'s - :attr:`evaluations` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace - def get(self, id: str, **kwargs: Any) -> _models.Evaluation: - """Resource read operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - _request = build_evaluations_get_request( - id=id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @overload - def create( - self, evaluation: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Required. - :type evaluation: ~azure.ai.client.models.Evaluation - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create(self, evaluation: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Required. - :type evaluation: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create( - self, evaluation: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Required. - :type evaluation: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Is one of the following types: Evaluation, JSON, - IO[bytes] Required. - :type evaluation: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(evaluation, (IOBase, bytes)): - _content = evaluation - else: - _content = json.dumps(evaluation, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_evaluations_create_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any - ) -> Iterable["_models.Evaluation"]: - """Resource list operation template. - - :keyword top: The number of result items to return. Default value is None. - :paramtype top: int - :keyword skip: The number of result items to skip. Default value is None. - :paramtype skip: int - :return: An iterator like instance of Evaluation - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.client.models.Evaluation] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_evaluations_list_request( - top=top, - skip=skip, - maxpagesize=maxpagesize, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, iter(list_of_elem) - - def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return ItemPaged(get_next, extract_data) - - @overload - def update( - self, - id: str, - resource: _models.Evaluation, - *, - content_type: str = "application/merge-patch+json", - **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: ~azure.ai.client.models.Evaluation - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update( - self, id: str, resource: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update( - self, id: str, resource: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update( - self, id: str, resource: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Is one of the following types: Evaluation, JSON, - IO[bytes] Required. - :type resource: ~azure.ai.client.models.Evaluation or JSON or IO[bytes] - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - content_type = content_type or "application/merge-patch+json" - _content = None - if isinstance(resource, (IOBase, bytes)): - _content = resource - else: - _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_evaluations_update_request( - id=id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_schedule(self, id: str, **kwargs: Any) -> _models.EvaluationSchedule: - """Resource read operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.client.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) - - _request = build_evaluations_get_schedule_request( - id=id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.EvaluationSchedule, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @overload - def create_or_replace_schedule( - self, id: str, resource: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: ~azure.ai.client.models.EvaluationSchedule - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.client.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_or_replace_schedule( - self, id: str, resource: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.client.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_or_replace_schedule( - self, id: str, resource: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.client.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_or_replace_schedule( - self, id: str, resource: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Is one of the following types: EvaluationSchedule, - JSON, IO[bytes] Required. - :type resource: ~azure.ai.client.models.EvaluationSchedule or JSON or IO[bytes] - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.client.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(resource, (IOBase, bytes)): - _content = resource - else: - _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_evaluations_create_or_replace_schedule_request( - id=id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.EvaluationSchedule, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_schedule( - self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any - ) -> Iterable["_models.EvaluationSchedule"]: - """Resource list operation template. - - :keyword top: The number of result items to return. Default value is None. - :paramtype top: int - :keyword skip: The number of result items to skip. Default value is None. - :paramtype skip: int - :return: An iterator like instance of EvaluationSchedule - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.client.models.EvaluationSchedule] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.EvaluationSchedule]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_evaluations_list_schedule_request( - top=top, - skip=skip, - maxpagesize=maxpagesize, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.EvaluationSchedule], deserialized["value"]) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, iter(list_of_elem) - - def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return ItemPaged(get_next, extract_data) - - @distributed_trace - def delete_schedule(self, id: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements - """Resource delete operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[None] = kwargs.pop("cls", None) - - _request = build_evaluations_delete_schedule_request( - id=id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if cls: - return cls(pipeline_response, None, response_headers) # type: ignore diff --git a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py b/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py deleted file mode 100644 index b1ffba2950c4..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/operations/_patch.py +++ /dev/null @@ -1,1982 +0,0 @@ -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -import sys, io, logging, os, time -from io import IOBase -from typing import List, Iterable, Union, IO, Any, Dict, Optional, overload, TYPE_CHECKING, Iterator, cast - -# from zoneinfo import ZoneInfo -from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated -from ._operations import AgentsOperations as AgentsOperationsGenerated -from ..models._enums import AuthenticationType, ConnectionType -from ..models._models import ConnectionsListSecretsResponse, ConnectionsListResponse -from .._types import AgentsApiResponseFormatOption -from ..models._patch import ConnectionProperties -from ..models._enums import FilePurpose -from .._vendor import FileType -from .. import models as _models - -from azure.core.tracing.decorator import distributed_trace - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from .. import _types - -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() - -logger = logging.getLogger(__name__) - - -class InferenceOperations: - - def __init__(self, outer_instance): - self.outer_instance = outer_instance - - @distributed_trace - def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": - """Get an authenticated ChatCompletionsClient (from the package azure-ai-inference) for the default - Serverless connection. The Serverless connection must have a Chat Completions AI model deployment. - The package `azure-ai-inference` must be installed prior to calling this method. - - :return: An authenticated chat completions client - :rtype: ~azure.ai.inference.models.ChatCompletionsClient - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connection = self.outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No serverless connection found") - - try: - from azure.ai.inference import ChatCompletionsClient - except ModuleNotFoundError as _: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) - ) - elif connection.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" - ) - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential - ) - elif connection.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. - logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" - ) - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace - def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": - """Get an authenticated EmbeddingsClient (from the package azure-ai-inference) for the default - Serverless connection. The Serverless connection must have a Text Embeddings AI model deployment. - The package `azure-ai-inference` must be installed prior to calling this method. - - :return: An authenticated chat completions client - :rtype: ~azure.ai.inference.models.EmbeddingsClient - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connection = self.outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No serverless connection found") - - try: - from azure.ai.inference import EmbeddingsClient - except ModuleNotFoundError as _: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = EmbeddingsClient( - endpoint=connection.authentication_type, credential=AzureKeyCredential(connection.key) - ) - elif connection.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" - ) - client = EmbeddingsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential - ) - elif connection.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" - ) - client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace - def get_azure_openai_client(self, **kwargs) -> "AzureOpenAI": - """Get an authenticated AzureOpenAI client (from the `openai` package) for the default - Azure OpenAI connection. The package `openai` must be installed prior to calling this method. - - :return: An authenticated AzureOpenAI client - :rtype: ~openai.AzureOpenAI - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connection = self.outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No Azure OpenAI connection found") - - try: - from openai import AzureOpenAI - except ModuleNotFoundError as _: - raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai'") - - # Pick latest GA version from the "Data plane - Inference" row in the table - # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - AZURE_OPENAI_API_VERSION = "2024-06-01" - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" - ) - client = AzureOpenAI( - api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION - ) - elif connection.authentication_type == AuthenticationType.AAD: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" - ) - try: - from azure.identity import get_bearer_token_provider - except ModuleNotFoundError as _: - raise ModuleNotFoundError( - "azure.identity package not installed. Please install it using 'pip install azure.identity'" - ) - client = AzureOpenAI( - # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") - client = AzureOpenAI( - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION, - ) - else: - raise ValueError("Unknown authentication type") - - return client - - -class ConnectionsOperations(ConnectionsOperationsGenerated): - - @distributed_trace - def get_default( - self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: - """Get the properties of the default connection of a certain connection type, with or without - populating authentication credentials. - - :param connection_type: The connection type. Required. - :type connection_type: ~azure.ai.client.models._models.ConnectionType - :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. - :type with_credentials: bool - :return: The connection properties - :rtype: ~azure.ai.client.models._models.ConnectionProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_type: - raise ValueError("You must specify an connection type") - # Since there is no notion of default connection at the moment, list all connections in the category - # and return the first one - connection_properties_list = self.list(connection_type=connection_type, **kwargs) - if len(connection_properties_list) > 0: - if with_credentials: - return self.get( - connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs - ) - else: - return connection_properties_list[0] - else: - return None - - @distributed_trace - def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: - """Get the properties of a single connection, given its connection name, with or without - populating authentication credentials. - - :param connection_name: Connection Name. Required. - :type connection_name: str - :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. - :type with_credentials: bool - :return: The connection properties - :rtype: ~azure.ai.client.models._models.ConnectionProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_name: - raise ValueError("Connection name cannot be empty") - if with_credentials: - connection: ConnectionsListSecretsResponse = self._list_secrets( - connection_name=connection_name, ignored="ignore", **kwargs - ) - if connection.properties.auth_type == AuthenticationType.AAD: - return ConnectionProperties(connection=connection, token_credential=self._config.credential) - elif connection.properties.auth_type == AuthenticationType.SAS: - from ..models._patch import SASTokenCredential - - token_credential = SASTokenCredential( - sas_token=connection.properties.credentials.sas, - credential=self._config.credential, - subscription_id=self._config.subscription_id, - resource_group_name=self._config.resource_group_name, - project_name=self._config.project_name, - connection_name=connection_name, - ) - return ConnectionProperties(connection=connection, token_credential=token_credential) - - return ConnectionProperties(connection=connection) - else: - return ConnectionProperties(connection=self._get(connection_name=connection_name, **kwargs)) - - @distributed_trace - def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) -> Iterable[ConnectionProperties]: - """List the properties of all connections, or all connections of a certain connection type. - - :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. - If not provided, all connections are listed. - :type connection_type: ~azure.ai.client.models._models.ConnectionType - :return: A list of connection properties - :rtype: Iterable[~azure.ai.client.models._models.ConnectionProperties] - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connections_list: ConnectionsListResponse = self._list(include_all=True, category=connection_type, **kwargs) - - # Iterate to create the simplified result property - connection_properties_list: List[ConnectionProperties] = [] - for connection in connections_list.value: - connection_properties_list.append(ConnectionProperties(connection=connection)) - - return connection_properties_list - - -class AgentsOperations(AgentsOperationsGenerated): - @overload - def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent( - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.client.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent( - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.client.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.client.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.Agent: - """ - Creates a new agent with various configurations, delegating to the generated operations. - - :param body: JSON or IO[bytes]. Required if `model` is not provided. - :param model: The ID of the model to use. Required if `body` is not provided. - :param name: The name of the new agent. - :param description: A description for the new agent. - :param instructions: System instructions for the agent. - :param tools: List of tools definitions for the agent. - :param tool_resources: Resources used by the agent's tools. - :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). - :param temperature: Sampling temperature for generating agent responses. - :param top_p: Nucleus sampling parameter. - :param response_format: Response format for tool calls. - :param metadata: Key/value pairs for storing additional information. - :param content_type: Content type of the body. - :param kwargs: Additional parameters. - :return: An Agent object. - :raises: HttpResponseError for HTTP errors. - """ - if body is not _Unset: - if isinstance(body, IOBase): - return super().create_agent(body=body, content_type=content_type, **kwargs) - return super().create_agent(body=body, **kwargs) - - if toolset is not None: - self._toolset = toolset - tools = toolset.definitions - tool_resources = toolset.resources - - return super().create_agent( - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - def get_toolset(self) -> Optional[_models.ToolSet]: - """ - Get the toolset for the agent. - - :return: The toolset for the agent. If not set, returns None. - :rtype: ~azure.ai.client.models.ToolSet - """ - if hasattr(self, "_toolset"): - return self._toolset - return None - - @overload - def create_run( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_run( - self, - thread_id: str, - *, - assistant_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_run( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_run( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - elif assistant_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=False, - stream=False, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # If streaming is enabled, return the custom stream object - return response - - @distributed_trace - def create_and_process_run( - self, - thread_id: str, - assistant_id: str, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: int = 1, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread and processes the run. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. - Default value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run - the thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or - ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or - ~azure.ai.client.models.AgentsApiResponseFormatMode or - ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: The time in seconds to wait between polling the service for run status. - Default value is 1. - :paramtype sleep_interval: int - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - # Create and initiate the run with additional parameters - run = self.create_run( - thread_id=thread_id, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - # Monitor and process the run status - while run.status in ["queued", "in_progress", "requires_action"]: - time.sleep(sleep_interval) - run = self.get_run(thread_id=thread_id, run_id=run.id) - - if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logging.warning("No tool calls provided - cancelling run") - self.cancel_run(thread_id=thread_id, run_id=run.id) - break - - toolset = self.get_toolset() - if toolset: - tool_outputs = toolset.execute_tool_calls(tool_calls) - else: - raise ValueError("Toolset is not available in the client.") - - logging.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) - - logging.info("Current run status: %s", run.status) - - return run - - @overload - def create_stream( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentRunStream: - """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_stream( - self, - thread_id: str, - *, - assistant_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AgentRunStream: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_stream( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentRunStream: - """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AgentRunStream: - """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.client.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.client.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.client.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.client.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.client.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.client.models.AgentsApiResponseFormatMode - or ~azure.ai.client.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - elif assistant_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=True, - stream=True, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) - - return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - @overload - def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] - :param event_handler: The event handler to use for processing events during the run. - :param kwargs: Additional parameters. - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.client.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # If streaming is enabled, return the custom stream object - return response - - @overload - def submit_tool_outputs_to_stream( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.client.models.AgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_stream( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.client.models.ToolOutput] - :param event_handler: The event handler to use for processing events during the run. - :param kwargs: Additional parameters. - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.client.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # Cast the response to Iterator[bytes] for type correctness - response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) - - return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - def _handle_submit_tool_outputs( - self, run: _models.ThreadRun, event_handler: Optional[_models.AgentEventHandler] = None - ) -> None: - if isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logger.debug("No tool calls to execute.") - return - - toolset = self.get_toolset() - if toolset: - tool_outputs = toolset.execute_tool_calls(tool_calls) - else: - logger.warning("Toolset is not available in the client.") - return - - logger.info(f"Tool outputs: {tool_outputs}") - if tool_outputs: - with self.submit_tool_outputs_to_stream( - thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler - ) as stream: - stream.until_done() - - @overload - def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file( - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.client._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file( - self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def upload_file( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :param file: File content. Required if `body` and `purpose` are not provided. - :param file_path: Path to the file. Required if `body` and `purpose` are not provided. - :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :param filename: The name of the file. - :param kwargs: Additional parameters. - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - return super().upload_file(body=body, **kwargs) - - if isinstance(purpose, FilePurpose): - purpose = purpose.value - - if file is not None and purpose is not None: - return super().upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - - if file_path is not None and purpose is not None: - if not os.path.isfile(file_path): - raise FileNotFoundError(f"The file path provided does not exist: {file_path}") - - try: - with open(file_path, "rb") as f: - content = f.read() - - # Determine filename and create correct FileType - base_filename = filename or os.path.basename(file_path) - file_content: FileType = (base_filename, content) - - return super().upload_file(file=file_content, purpose=purpose, **kwargs) - except IOError as e: - raise IOError(f"Unable to read file: {file_path}. Reason: {str(e)}") - - raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") - - @overload - def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file_and_poll( - self, - *, - file: FileType, - purpose: Union[str, _models.FilePurpose], - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.client._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file_and_poll( - self, file_path: str, *, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.client.models.FilePurpose - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.client.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def upload_file_and_poll( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :param file: File content. Required if `body` and `purpose` are not provided. - :param file_path: Path to the file. Required if `body` and `purpose` are not provided. - :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :param filename: The name of the file. - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :param kwargs: Additional parameters. - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - uploaded_file = self.upload_file(body=body, **kwargs) - elif file is not None and purpose is not None: - uploaded_file = self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - elif file_path is not None and purpose is not None: - uploaded_file = self.upload_file(file_path=file_path, purpose=purpose, **kwargs) - else: - raise ValueError( - "Invalid parameters for upload_file_and_poll. Please provide either 'body', " - "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." - ) - - while uploaded_file.status in ["uploaded", "pending", "running"]: - time.sleep(sleep_interval) - uploaded_file = self.get_file(uploaded_file.id) - - return uploaded_file - - @overload - def create_vector_store_and_poll( - self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_and_poll( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_and_poll( - self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_and_poll( - self, - body: Union[JSON, IO[bytes], None] = None, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.client.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not None: - vector_store = self.create_vector_store(body=body, content_type=content_type, **kwargs) - elif file_ids is not None or (name is not None and expires_after is not None): - vector_store = self.create_vector_store( - content_type=content_type, - file_ids=file_ids, - name=name, - expires_after=expires_after, - chunking_strategy=chunking_strategy, - metadata=metadata, - **kwargs, - ) - else: - raise ValueError( - "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " - "'file_ids', or 'name' and 'expires_after'." - ) - - while vector_store.status == "in_progress": - time.sleep(sleep_interval) - vector_store = self.get_vector_store(vector_store.id) - - return vector_store - - @overload - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - *, - file_ids: List[str], - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = None, - *, - file_ids: List[str] = _Unset, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.client.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.client.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is None: - vector_store_file_batch = super().create_vector_store_file_batch( - vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs - ) - else: - content_type = kwargs.get("content_type", "application/json") - vector_store_file_batch = super().create_vector_store_file_batch( - body=body, content_type=content_type, **kwargs - ) - - while vector_store_file_batch.status == "in_progress": - time.sleep(sleep_interval) - vector_store_file_batch = super().get_vector_store_file_batch( - vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id - ) - - return vector_store_file_batch - - -__all__: List[str] = [ - "AgentsOperations", - "ConnectionsOperations", - "InferenceOperations", -] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/ai/azure-ai-client/azure/ai/client/py.typed b/sdk/ai/azure-ai-client/azure/ai/client/py.typed deleted file mode 100644 index e5aff4f83af8..000000000000 --- a/sdk/ai/azure-ai-client/azure/ai/client/py.typed +++ /dev/null @@ -1 +0,0 @@ -# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/dev_requirements.txt b/sdk/ai/azure-ai-client/dev_requirements.txt deleted file mode 100644 index c82827bb56f4..000000000000 --- a/sdk/ai/azure-ai-client/dev_requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ --e ../../../tools/azure-sdk-tools -../../core/azure-core -../../identity/azure-identity -aiohttp \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/requirements.txt b/sdk/ai/azure-ai-client/requirements.txt deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py deleted file mode 100644 index 5824588df32e..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_basics_async.py +++ /dev/null @@ -1,76 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_basics_async.py - -DESCRIPTION: - This sample demonstrates how to use basic agent operations from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_basics_async.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio -import time - -from azure.ai.client.aio import AzureAIClient -from azure.identity import DefaultAzureCredential - -import os - - -async def main(): - - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - async with ai_client: - agent = await ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - message = await ai_client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) - print(f"Created message, message ID: {message.id}") - - run = await ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - - # poll the run as long as run status is queued or in progress - while run.status in ["queued", "in_progress", "requires_action"]: - # wait for a second - time.sleep(1) - run = await ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) - - print(f"Run status: {run.status}") - - print(f"Run completed with status: {run.status}") - - await ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = await ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py deleted file mode 100644 index da0cd590bc2b..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_functions_async.py +++ /dev/null @@ -1,117 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_functions_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with custom functions from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_functions_async.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio -import time - -from azure.ai.client.aio import AzureAIClient -from azure.ai.client.models import AsyncFunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction -from azure.identity import DefaultAzureCredential - -import os - -from user_async_functions import user_async_functions - - -async def main(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - async with ai_client: - # Initialize assistant functions - functions = AsyncFunctionTool(functions=user_async_functions) - - # Create agent - agent = await ai_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - tools=functions.definitions, - ) - print(f"Created agent, agent ID: {agent.id}") - - # Create thread for communication - thread = await ai_client.agents.create_thread() - print(f"Created thread, ID: {thread.id}") - - # Create and send message - message = await ai_client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, what's the time?" - ) - print(f"Created message, ID: {message.id}") - - # Create and run assistant task - run = await ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, ID: {run.id}") - - # Polling loop for run status - while run.status in ["queued", "in_progress", "requires_action"]: - time.sleep(4) - run = await ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) - - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print("No tool calls provided - cancelling run") - await ai_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) - break - - tool_outputs = [] - for tool_call in tool_calls: - if isinstance(tool_call, RequiredFunctionToolCall): - try: - output = await functions.execute(tool_call) - tool_outputs.append( - { - "tool_call_id": tool_call.id, - "output": output, - } - ) - except Exception as e: - print(f"Error executing tool_call {tool_call.id}: {e}") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - await ai_client.agents.submit_tool_outputs_to_run( - thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs - ) - - print(f"Current run status: {run.status}") - - print(f"Run completed with status: {run.status}") - - # Delete the agent when done - await ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = await ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py deleted file mode 100644 index 6d7a61fbf551..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py +++ /dev/null @@ -1,96 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_eventhandler_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler in streaming from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_stream_eventhandler_async.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio -from typing import Any - -from azure.ai.client.aio import AzureAIClient -from azure.ai.client.models._models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.ai.client.models._patch import AsyncAgentEventHandler -from azure.identity import DefaultAzureCredential - -import os - - -class MyEventHandler(AsyncAgentEventHandler): - async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - for content_part in delta.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - async def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - async def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - async def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - async def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - async def on_done(self) -> None: - print("Stream completed.") - - async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -async def main(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - async with ai_client: - agent = await ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await ai_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = await ai_client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) - print(f"Created message, message ID {message.id}") - - async with await ai_client.agents.create_stream( - thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() - ) as stream: - await stream.until_done() - - await ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = await ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py deleted file mode 100644 index df6457829769..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py +++ /dev/null @@ -1,111 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_eventhandler_with_toolset_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler and toolset from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_stream_eventhandler_with_toolset_async.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio -from typing import Any - -from azure.ai.client.aio import AzureAIClient -from azure.ai.client.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.ai.client.models import AsyncAgentEventHandler, AsyncFunctionTool, AsyncToolSet -from azure.ai.client.aio.operations import AgentsOperations -from azure.identity import DefaultAzureCredential - -import os - -from user_async_functions import user_async_functions - - -class MyEventHandler(AsyncAgentEventHandler): - - async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - for content_part in delta.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - async def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - async def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - if run.status == "failed": - print(f"Run failed. Error: {run.last_error}") - - async def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - async def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - async def on_done(self) -> None: - print("Stream completed.") - - async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -async def main(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - # Initialize toolset with user functions - functions = AsyncFunctionTool(user_async_functions) - toolset = AsyncToolSet() - toolset.add(functions) - - async with ai_client: - - agent = await ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", toolset=toolset - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await ai_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = await ai_client.agents.create_message( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", - ) - print(f"Created message, message ID {message.id}") - - async with await ai_client.agents.create_stream( - thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() - ) as stream: - await stream.until_done() - - await ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = await ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py deleted file mode 100644 index 378ab820cab4..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_stream_iteration_async.py +++ /dev/null @@ -1,92 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_iteration_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with interation in streaming from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_stream_iteration_async.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio - -from azure.ai.client.aio import AzureAIClient -from azure.ai.client.models import AgentStreamEvent -from azure.ai.client.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.identity import DefaultAzureCredential - -import os - - -async def main(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - async with ai_client: - agent = await ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await ai_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = await ai_client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) - print(f"Created message, message ID {message.id}") - - async with await ai_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: - async for event_type, event_data in stream: - - if isinstance(event_data, MessageDeltaChunk): - for content_part in event_data.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - elif isinstance(event_data, ThreadMessage): - print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") - - elif isinstance(event_data, ThreadRun): - print(f"ThreadRun status: {event_data.status}") - - elif isinstance(event_data, RunStep): - print(f"RunStep type: {event_data.type}, Status: {event_data.status}") - - elif event_type == AgentStreamEvent.ERROR: - print(f"An error occurred. Data: {event_data}") - - elif event_type == AgentStreamEvent.DONE: - print("Stream completed.") - break - - else: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - await ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = await ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py deleted file mode 100644 index 63906d1e26e2..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py +++ /dev/null @@ -1,109 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_vector_store_batch_file_search_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_vector_store_batch_file_search_async.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import asyncio -import os -from azure.ai.client.aio import AzureAIClient -from azure.ai.client.models import FileSearchTool, FilePurpose -from azure.identity import DefaultAzureCredential - - -async def main(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - async with ai_client: - - # upload a file and wait for it to be processed - file = await ai_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - # create a vector store with no file and wait for it to be processed - vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") - print(f"Created vector store, vector store ID: {vector_store.id}") - - # add the file to the vector store or you can supply file ids in the vector store creation - vector_store_file_batch = await ai_client.agents.create_vector_store_file_batch_and_poll( - vector_store_id=vector_store.id, file_ids=[file.id] - ) - print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") - - # create a file search tool - file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - - # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file - agent = await ai_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - message = await ai_client.agents.create_message( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, run ID: {run.id}") - - file_search_tool.remove_vector_store(vector_store.id) - print(f"Removed vector store from file search, vector store ID: {vector_store.id}") - - await ai_client.agents.update_agent(assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources) - print(f"Updated agent, agent ID: {agent.id}") - - thread = await ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - message = await ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?") - print(f"Created message, message ID: {message.id}") - - run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, run ID: {run.id}") - - await ai_client.agents.delete_file(file.id) - print("Deleted file") - - await ai_client.agents.delete_vector_store(vector_store.id) - print("Deleted vectore store") - - await ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = await ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py deleted file mode 100644 index 6381168e1d12..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ /dev/null @@ -1,83 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_with_file_search_attachment_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations to create messages with file search attachments from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_with_file_search_attachment_async.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio - -from azure.ai.client.aio import AzureAIClient -from azure.ai.client.models import FilePurpose -from azure.ai.client.models import FileSearchTool, MessageAttachment, ToolResources -from azure.identity import DefaultAzureCredential - -import os - - -async def main(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - # upload a file and wait for it to be processed - async with ai_client: - file = await ai_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) - - # Create agent with file search tool - agent = await ai_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - # Create a message with the file search attachment - # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. - attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) - message = await ai_client.agents.create_message( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] - ) - print(f"Created message, message ID: {message.id}") - - run = await ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id, sleep_interval=4 - ) - print(f"Created run, run ID: {run.id}") - - print(f"Run completed with status: {run.status}") - - await ai_client.agents.delete_file(file.id) - print("Deleted file") - - await ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = await ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/agents/async_samples/user_async_functions.py b/sdk/ai/azure-ai-client/samples/agents/async_samples/user_async_functions.py deleted file mode 100644 index 4931352e03c6..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/async_samples/user_async_functions.py +++ /dev/null @@ -1,29 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -import asyncio -import os -import sys - - -# Add parent directory to sys.path to import user_functions -current_dir = os.path.dirname(os.path.abspath(__file__)) -parent_dir = os.path.abspath(os.path.join(current_dir, "..")) -if parent_dir not in sys.path: - sys.path.insert(0, parent_dir) -from user_functions import fetch_current_datetime, fetch_weather, send_email - - -async def send_email_async(recipient: str, subject: str, body: str) -> str: - await asyncio.sleep(1) - return send_email(recipient, subject, body) - - -# Statically defined user functions for fast reference with send_email as async but the rest as sync -user_async_functions = { - "fetch_current_datetime": fetch_current_datetime, - "fetch_weather": fetch_weather, - "send_email": send_email_async, -} diff --git a/sdk/ai/azure-ai-client/samples/agents/product_info_1.md b/sdk/ai/azure-ai-client/samples/agents/product_info_1.md deleted file mode 100644 index 041155831d53..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/product_info_1.md +++ /dev/null @@ -1,51 +0,0 @@ -# Information about product item_number: 1 - -## Brand -Contoso Galaxy Innovations - -## Category -Smart Eyewear - -## Features -- Augmented Reality interface -- Voice-controlled AI assistant -- HD video recording with 3D audio -- UV protection and blue light filtering -- Wireless charging with extended battery life - -## User Guide - -### 1. Introduction -Introduction to your new SmartView Glasses - -### 2. Product Overview -Overview of features and controls - -### 3. Sizing and Fit -Finding your perfect fit and style adjustments - -### 4. Proper Care and Maintenance -Cleaning and caring for your SmartView Glasses - -### 5. Break-in Period -Adjusting to the augmented reality experience - -### 6. Safety Tips -Safety guidelines for public and private spaces - -### 7. Troubleshooting -Quick fixes for common issues - -## Warranty Information -Two-year limited warranty on all electronic components - -## Contact Information -Customer Support at support@contoso-galaxy-innovations.com - -## Return Policy -30-day return policy with no questions asked - -## FAQ -- How to sync your SmartView Glasses with your devices -- Troubleshooting connection issues -- Customizing your augmented reality environment diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py deleted file mode 100644 index 52a91f9ec043..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_basics.py +++ /dev/null @@ -1,63 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_basics.py - -DESCRIPTION: - This sample demonstrates how to use basic agent operations from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_basics.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os, time -from azure.ai.client import AzureAIClient -from azure.identity import DefaultAzureCredential - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) - -with ai_client: - agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - print(f"Created message, message ID: {message.id}") - - run = ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - - # poll the run as long as run status is queued or in progress - while run.status in ["queued", "in_progress", "requires_action"]: - # wait for a second - time.sleep(1) - run = ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) - - print(f"Run status: {run.status}") - - ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = ai_client.agents.list_messages(thread_id=thread.id) - print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py deleted file mode 100644 index af690c4ce369..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_code_interpreter_attachment.py +++ /dev/null @@ -1,80 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_code_interpreter_attachment.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with code interpreter from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_code_interpreter_attachment.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.client import AzureAIClient -from azure.ai.client.models import CodeInterpreterTool -from azure.ai.client.models import FilePurpose -from azure.ai.client.models import MessageAttachment -from azure.identity import DefaultAzureCredential - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - -with ai_client: - # upload a file and wait for it to be processed - file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - code_interpreter = CodeInterpreterTool() - - # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment - agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - tools=code_interpreter.definitions, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - # create a message with the attachment - attachment = MessageAttachment(file_id=file.id, tools=code_interpreter.definitions) - message = ai_client.agents.create_message( - thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] - ) - print(f"Created message, message ID: {message.id}") - - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - # Check if you got "Rate limit is exceeded.", then you want to get more quota - print(f"Run failed: {run.last_error}") - - ai_client.agents.delete_file(file.id) - print("Deleted file") - - ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py deleted file mode 100644 index df47c9413b7f..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_file_search.py +++ /dev/null @@ -1,87 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_file_search.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with file searching from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_file_search.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.client import AzureAIClient -from azure.ai.client.models._patch import FileSearchTool -from azure.identity import DefaultAzureCredential - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - -with ai_client: - - openai_file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") - print(f"Uploaded file, file ID: {openai_file.id}") - - openai_vectorstore = ai_client.agents.create_vector_store_and_poll(file_ids=[openai_file.id], name="my_vectorstore") - print(f"Created vector store, vector store ID: {openai_vectorstore.id}") - - # Create file search tool with resources - file_search = FileSearchTool(vector_store_ids=[openai_vectorstore.id]) - - # Create agent with file search tool and process assistant run - agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="Hello, you are helpful assistant and can search information from uploaded files", - tools=file_search.definitions, - tool_resources=file_search.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - # Create thread for communication - thread = ai_client.agents.create_thread() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = ai_client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" - ) - print(f"Created message, ID: {message.id}") - - # Create and process assistant run in thread with tools - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - # Check if you got "Rate limit is exceeded.", then you want to get more quota - print(f"Run failed: {run.last_error}") - - # Delete the file when done - ai_client.agents.delete_vector_store(openai_vectorstore.id) - print("Deleted vector store") - - # Delete the agent when done - ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py deleted file mode 100644 index c403732b7d8f..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_functions.py +++ /dev/null @@ -1,105 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_functions.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with custom functions from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_functions.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import os, time -from azure.ai.client import AzureAIClient -from azure.identity import DefaultAzureCredential -from azure.ai.client.models import FunctionTool, SubmitToolOutputsAction, RequiredFunctionToolCall -from user_functions import user_functions - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - -# Initialize function tool with user functions -functions = FunctionTool(functions=user_functions) - -with ai_client: - # Create an agent and run user's request with function calls - agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are a helpful assistant", - tools=functions.definitions, - ) - print(f"Created agent, ID: {agent.id}") - - thread = ai_client.agents.create_thread() - print(f"Created thread, ID: {thread.id}") - - message = ai_client.agents.create_message( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York?", - ) - print(f"Created message, ID: {message.id}") - - run = ai_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, ID: {run.id}") - - while run.status in ["queued", "in_progress", "requires_action"]: - time.sleep(1) - run = ai_client.agents.get_run(thread_id=thread.id, run_id=run.id) - - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print("No tool calls provided - cancelling run") - ai_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) - break - - tool_outputs = [] - for tool_call in tool_calls: - if isinstance(tool_call, RequiredFunctionToolCall): - try: - output = functions.execute(tool_call) - tool_outputs.append( - { - "tool_call_id": tool_call.id, - "output": output, - } - ) - except Exception as e: - print(f"Error executing tool_call {tool_call.id}: {e}") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - ai_client.agents.submit_tool_outputs_to_run( - thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs - ) - - print(f"Current run status: {run.status}") - - print(f"Run completed with status: {run.status}") - - # Delete the agent when done - ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py deleted file mode 100644 index 11897b9f1f85..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_run_with_toolset.py +++ /dev/null @@ -1,80 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_run_with_toolset.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with toolset from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_run_with_toolset.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.client import AzureAIClient -from azure.identity import DefaultAzureCredential -from azure.ai.client.models import FunctionTool, ToolSet, CodeInterpreterTool -from user_functions import user_functions - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) - -# Initialize agent toolset with user functions and code interpreter -functions = FunctionTool(user_functions) -code_interpreter = CodeInterpreterTool() - -toolset = ToolSet() -toolset.add(functions) -toolset.add(code_interpreter) - -# Create agent with toolset and process assistant run -with ai_client: - agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset - ) - print(f"Created agent, ID: {agent.id}") - - # Create thread for communication - thread = ai_client.agents.create_thread() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = ai_client.agents.create_message( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York?", - ) - print(f"Created message, ID: {message.id}") - - # Create and process agent run in thread with tools - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Delete the assistant when done - ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py deleted file mode 100644 index a20b6176a616..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler.py +++ /dev/null @@ -1,98 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_eventhandler.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler in streaming from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_eventhandler.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.client import AzureAIClient -from azure.identity import DefaultAzureCredential - -from azure.ai.client.models import ( - AgentEventHandler, - MessageDeltaTextContent, - MessageDeltaChunk, - ThreadMessage, - ThreadRun, - RunStep, -) - -from typing import Any - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) - - -class MyEventHandler(AgentEventHandler): - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - for content_part in delta.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - def on_done(self) -> None: - print("Stream completed.") - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -with ai_client: - # Create an agent and run stream with event handler - agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" - ) - print(f"Created agent, agent ID {agent.id}") - - thread = ai_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - print(f"Created message, message ID {message.id}") - - with ai_client.agents.create_stream( - thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() - ) as stream: - stream.until_done() - - ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py deleted file mode 100644 index 5dea7d60b6be..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_functions.py +++ /dev/null @@ -1,132 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_eventhandler_with_functions.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler and toolset from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_eventhandler_with_functions.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.client import AzureAIClient -from azure.ai.client.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.ai.client.models import AgentEventHandler -from azure.identity import DefaultAzureCredential -from azure.ai.client.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction - -from typing import Any - -from user_functions import user_functions - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - - -class MyEventHandler(AgentEventHandler): - - def __init__(self, functions: FunctionTool) -> None: - self.functions = functions - - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - for content_part in delta.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - if run.status == "failed": - print(f"Run failed. Error: {run.last_error}") - - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - - tool_outputs = [] - for tool_call in tool_calls: - if isinstance(tool_call, RequiredFunctionToolCall): - try: - output = functions.execute(tool_call) - tool_outputs.append( - { - "tool_call_id": tool_call.id, - "output": output, - } - ) - except Exception as e: - print(f"Error executing tool_call {tool_call.id}: {e}") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - with ai_client.agents.submit_tool_outputs_to_stream( - thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=self - ) as stream: - stream.until_done() - - def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - def on_done(self) -> None: - print("Stream completed.") - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -with ai_client: - functions = FunctionTool(user_functions) - - agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are a helpful assistant", - tools=functions.definitions, - ) - print(f"Created agent, ID: {agent.id}") - - thread = ai_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = ai_client.agents.create_message( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", - ) - print(f"Created message, message ID {message.id}") - - with ai_client.agents.create_stream( - thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler(functions) - ) as stream: - stream.until_done() - - ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py deleted file mode 100644 index b02eac59906d..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ /dev/null @@ -1,109 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_eventhandler_with_toolset.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler and toolset from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_eventhandler_with_toolset.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.client import AzureAIClient -from azure.ai.client.models import Agent, MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.ai.client.models import AgentEventHandler -from azure.ai.client.operations import AgentsOperations -from azure.identity import DefaultAzureCredential -from azure.ai.client.models import FunctionTool, ToolSet - - -import os -from typing import Any - -from user_functions import user_functions - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - - -# When using FunctionTool with ToolSet in agent creation, the tool call events are handled inside the create_stream -# method and functions gets automatically called by default. -class MyEventHandler(AgentEventHandler): - - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - for content_part in delta.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - if run.status == "failed": - print(f"Run failed. Error: {run.last_error}") - - def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - def on_done(self) -> None: - print("Stream completed.") - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -with ai_client: - functions = FunctionTool(user_functions) - toolset = ToolSet() - toolset.add(functions) - - agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset - ) - print(f"Created agent, ID: {agent.id}") - - thread = ai_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = ai_client.agents.create_message( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", - ) - print(f"Created message, message ID {message.id}") - - with ai_client.agents.create_stream( - thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() - ) as stream: - stream.until_done() - - ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py deleted file mode 100644 index 3c223ac162b9..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration.py +++ /dev/null @@ -1,92 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_iteration.py - -DESCRIPTION: - This sample demonstrates how to use agent operations in streaming from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_iteration.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.client import AzureAIClient -from azure.identity import DefaultAzureCredential -from azure.ai.client.models import ( - AgentStreamEvent, - MessageDeltaTextContent, - MessageDeltaChunk, - ThreadMessage, - ThreadRun, - RunStep, -) - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) - -with ai_client: - # Create an agent and run stream with iteration - agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" - ) - print(f"Created agent, ID {agent.id}") - - thread = ai_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - print(f"Created message, message ID {message.id}") - - with ai_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: - - for event_type, event_data in stream: - - if isinstance(event_data, MessageDeltaChunk): - for content_part in event_data.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - elif isinstance(event_data, ThreadMessage): - print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") - - elif isinstance(event_data, ThreadRun): - print(f"ThreadRun status: {event_data.status}") - - elif isinstance(event_data, RunStep): - print(f"RunStep type: {event_data.type}, Status: {event_data.status}") - - elif event_type == AgentStreamEvent.ERROR: - print(f"An error occurred. Data: {event_data}") - - elif event_type == AgentStreamEvent.DONE: - print("Stream completed.") - break - - else: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py deleted file mode 100644 index f28c20ffab50..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_stream_iteration_with_toolset.py +++ /dev/null @@ -1,122 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_iteration_with_toolset.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with toolset and iteration in streaming from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_iteration_with_toolset.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.client import AzureAIClient -from azure.ai.client.models import AgentStreamEvent -from azure.ai.client.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.ai.client.models import FunctionTool, ToolSet -from azure.ai.client.operations import AgentsOperations -from azure.identity import DefaultAzureCredential -from user_functions import user_functions - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - - -# Function to handle tool stream iteration -def handle_submit_tool_outputs(operations: AgentsOperations, thread_id, run_id, tool_outputs): - try: - with operations.submit_tool_outputs_to_stream( - thread_id=thread_id, - run_id=run_id, - tool_outputs=tool_outputs, - ) as tool_stream: - for tool_event_type, tool_event_data in tool_stream: - if tool_event_type == AgentStreamEvent.ERROR: - print(f"An error occurred in tool stream. Data: {tool_event_data}") - elif tool_event_type == AgentStreamEvent.DONE: - print("Tool stream completed.") - break - else: - if isinstance(tool_event_data, MessageDeltaChunk): - handle_message_delta(tool_event_data) - - except Exception as e: - print(f"Failed to process tool stream: {e}") - - -# Function to handle message delta chunks -def handle_message_delta(delta: MessageDeltaChunk) -> None: - for content_part in delta.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - -functions = FunctionTool(user_functions) -toolset = ToolSet() -toolset.add(functions) - -with ai_client: - agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = ai_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what's the time?") - print(f"Created message, message ID {message.id}") - - with ai_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: - - for event_type, event_data in stream: - - if isinstance(event_data, MessageDeltaChunk): - handle_message_delta(event_data) - - elif isinstance(event_data, ThreadMessage): - print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") - - elif isinstance(event_data, ThreadRun): - print(f"ThreadRun status: {event_data.status}") - - if event_data.status == "failed": - print(f"Run failed. Error: {event_data.last_error}") - - elif isinstance(event_data, RunStep): - print(f"RunStep type: {event_data.type}, Status: {event_data.status}") - - elif event_type == AgentStreamEvent.ERROR: - print(f"An error occurred. Data: {event_data}") - - elif event_type == AgentStreamEvent.DONE: - print("Stream completed.") - break - - else: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py deleted file mode 100644 index 362fa5a53449..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_vector_store_batch_file_search.py +++ /dev/null @@ -1,104 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_vector_store_batch_file_search_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from - the Azure Agents service using a synchronous client. It also shows how to remove a vector store from file search tool - and update the agent after that. - -USAGE: - python sample_agents_vector_store_batch_file_search_async.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.client import AzureAIClient -from azure.ai.client.models import FileSearchTool, FilePurpose -from azure.identity import DefaultAzureCredential - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - -with ai_client: - - # upload a file and wait for it to be processed - file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - # create a vector store with no file and wait for it to be processed - vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") - print(f"Created vector store, vector store ID: {vector_store.id}") - - # add the file to the vector store or you can supply file ids in the vector store creation - vector_store_file_batch = ai_client.agents.create_vector_store_file_batch_and_poll( - vector_store_id=vector_store.id, file_ids=[file.id] - ) - print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") - - # create a file search tool - file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - - # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file - agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - message = ai_client.agents.create_message( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, run ID: {run.id}") - - file_search_tool.remove_vector_store(vector_store.id) - print(f"Removed vector store from file search, vector store ID: {vector_store.id}") - - ai_client.agents.update_agent(assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources) - print(f"Updated agent, agent ID: {agent.id}") - - thread = ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - message = ai_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?") - print(f"Created message, message ID: {message.id}") - - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, run ID: {run.id}") - - ai_client.agents.delete_file(file.id) - print("Deleted file") - - ai_client.agents.delete_vector_store(vector_store.id) - print("Deleted vectore store") - - ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py deleted file mode 100644 index 5e3a6f2f4d39..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/sample_agents_with_file_search_attachment.py +++ /dev/null @@ -1,75 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_with_file_search_attachment.py - -DESCRIPTION: - This sample demonstrates how to use agent operations to create messages with file search attachments from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_with_file_search_attachment.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.client import AzureAIClient -from azure.ai.client.models import FilePurpose -from azure.ai.client.models import MessageAttachment -from azure.ai.client.models import FileSearchTool -from azure.identity import DefaultAzureCredential - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - -with ai_client: - - # upload a file and wait for it to be processed - file = ai_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - # Create agent with file search tool - agent = ai_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = ai_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - # Create a message with the file search attachment - # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. - attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) - message = ai_client.agents.create_message( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] - ) - print(f"Created message, message ID: {message.id}") - - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, run ID: {run.id}") - - ai_client.agents.delete_file(file.id) - print("Deleted file") - - ai_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = ai_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-client/samples/agents/user_functions.py b/sdk/ai/azure-ai-client/samples/agents/user_functions.py deleted file mode 100644 index c1f9ef275751..000000000000 --- a/sdk/ai/azure-ai-client/samples/agents/user_functions.py +++ /dev/null @@ -1,66 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -import json -import datetime -from typing import Any, Callable, Set - -# These are the user-defined functions that can be called by the agent. - - -def fetch_current_datetime() -> str: - """ - Get the current time as a JSON string. - - :return: The current time in JSON format. - :rtype: str - """ - current_time = datetime.datetime.now() - time_json = json.dumps({"current_time": current_time.strftime("%Y-%m-%d %H:%M:%S")}) - return time_json - - -def fetch_weather(location: str) -> str: - """ - Fetches the weather information for the specified location. - - :param location (str): The location to fetch weather for. - :return: Weather information as a JSON string. - :rtype: str - """ - # In a real-world scenario, you'd integrate with a weather API. - # Here, we'll mock the response. - mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} - weather = mock_weather_data.get(location, "Weather data not available for this location.") - weather_json = json.dumps({"weather": weather}) - return weather_json - - -def send_email(recipient: str, subject: str, body: str) -> str: - """ - Sends an email with the specified subject and body to the recipient. - - :param recipient (str): Email address of the recipient. - :param subject (str): Subject of the email. - :param body (str): Body content of the email. - :return: Confirmation message. - :rtype: str - """ - # In a real-world scenario, you'd use an SMTP server or an email service API. - # Here, we'll mock the email sending. - print(f"Sending email to {recipient}...") - print(f"Subject: {subject}") - print(f"Body:\n{body}") - - message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) - return message_json - - -# Statically defined user functions for fast reference -user_functions: Set[Callable[..., Any]] = { - fetch_current_datetime, - fetch_weather, - send_email, -} \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py deleted file mode 100644 index 23644a3cdb14..000000000000 --- a/sdk/ai/azure-ai-client/samples/connections/async_samples/sample_connections_async.py +++ /dev/null @@ -1,139 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_connections_async.py - -DESCRIPTION: - Given an asynchronous AzureAIClient, this sample demonstrates how to enumerate connections - and get connections properties. - -USAGE: - python sample_connections_async.py - - Before running the sample: - - pip install azure.ai.client aiohttp azure-identity - - Set the environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import asyncio -import os -from azure.ai.client.aio import AzureAIClient -from azure.ai.client.models import ConnectionType, AuthenticationType -from azure.identity import DefaultAzureCredential - - -async def sample_connections_async(): - - ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], - ) - - async with ai_client: - - # List the properties of all connections - connections = await ai_client.connections.list() - print(f"====> Listing of all connections (found {len(connections)}):") - for connection in connections: - print(connection) - - # List the properties of all connections of a particular "type" (In this sample, Azure OpenAI connections) - connections = await ai_client.connections.list( - connection_type=ConnectionType.AZURE_OPEN_AI, - ) - print("====> Listing of all Azure Open AI connections (found {len(connections)}):") - for connection in connections: - print(connection) - - # Get the properties of the default connection of a particular "type", with credentials - connection = await ai_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, - with_credentials=True, # Optional. Defaults to "False" - ) - print("====> Get default Azure Open AI connection:") - print(connection) - - # Get the properties of a connection by connection name: - connection = await ai_client.connections.get( - connection_name=os.environ["AI_CLIENT_CONNECTION_NAME"], - with_credentials=True, # Optional. Defaults to "False" - ) - print("====> Get connection by name:") - print(connection) - - # Examples of how you would create Inference client - if connection.connection_type == ConnectionType.AZURE_OPEN_AI: - - from openai import AsyncAzureOpenAI - - if connection.authentication_type == AuthenticationType.API_KEY: - print("====> Creating AzureOpenAI client using API key authentication") - client = AsyncAzureOpenAI( - api_key=connection.key, - azure_endpoint=connection.endpoint_url, - api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - ) - elif connection.authentication_type == AuthenticationType.AAD: - print("====> Creating AzureOpenAI client using Entra ID authentication") - from azure.identity import get_bearer_token_provider - - client = AsyncAzureOpenAI( - # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - ) - else: - raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - - response = await client.chat.completions.create( - model="gpt-4-0613", - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], - ) - print(response.choices[0].message.content) - - elif connection.connection_type == ConnectionType.SERVERLESS: - - from azure.ai.inference.aio import ChatCompletionsClient - from azure.ai.inference.models import UserMessage - - if connection.authentication_type == AuthenticationType.API_KEY: - print("====> Creating ChatCompletionsClient using API key authentication") - from azure.core.credentials import AzureKeyCredential - - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) - ) - elif connection.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - print("====> Creating ChatCompletionsClient using Entra ID authentication") - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential - ) - else: - raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - - response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - await client.close() - print(response.choices[0].message.content) - - -async def main(): - await sample_connections_async() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/connections/sample_connections.py b/sdk/ai/azure-ai-client/samples/connections/sample_connections.py deleted file mode 100644 index a5ce848c110b..000000000000 --- a/sdk/ai/azure-ai-client/samples/connections/sample_connections.py +++ /dev/null @@ -1,120 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_connections.py - -DESCRIPTION: - Given an AzureAIClient, this sample demonstrates how to enumerate connections - and get connection properties. - -USAGE: - python sample_connections.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set the environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.client import AzureAIClient -from azure.ai.client.models import ConnectionType, AuthenticationType -from openai import AzureOpenAI -from azure.ai.inference import ChatCompletionsClient -from azure.ai.inference.models import UserMessage -from azure.identity import DefaultAzureCredential, get_bearer_token_provider -from azure.core.credentials import AzureKeyCredential - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) - -with ai_client: - # List the properties of all connections - connections = ai_client.connections.list() - print(f"====> Listing of all connections (found {len(connections)}):") - for connection in connections: - print(connection) - - # List the properties of all connections of a particular "type" (In this sample, Azure OpenAI connections) - connections = ai_client.connections.list( - connection_type=ConnectionType.AZURE_OPEN_AI, - ) - print("====> Listing of all Azure Open AI connections (found {len(connections)}):") - for connection in connections: - print(connection) - - # Get the properties of the default connection of a particular "type", with credentials - connection = ai_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, - with_credentials=True, # Optional. Defaults to "False" - ) - print("====> Get default Azure Open AI connection:") - print(connection) - - # Get the properties of a connection by connection name: - connection = ai_client.connections.get( - connection_name=os.environ["AI_CLIENT_CONNECTION_NAME"], with_credentials=True # Optional. Defaults to "False" - ) - print("====> Get connection by name:") - print(connection) - - -# Examples of how you would create Inference client -if connection.connection_type == ConnectionType.AZURE_OPEN_AI: - - if connection.authentication_type == AuthenticationType.API_KEY: - print("====> Creating AzureOpenAI client using API key authentication") - client = AzureOpenAI( - api_key=connection.key, - azure_endpoint=connection.endpoint_url, - api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - ) - elif connection.authentication_type == AuthenticationType.AAD: - print("====> Creating AzureOpenAI client using Entra ID authentication") - client = AzureOpenAI( - # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - ) - else: - raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - - response = client.chat.completions.create( - model="gpt-4-0613", - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], - ) - client.close() - print(response.choices[0].message.content) - -elif connection.connection_type == ConnectionType.SERVERLESS: - - if connection.authentication_type == AuthenticationType.API_KEY: - print("====> Creating ChatCompletionsClient using API key authentication") - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) - elif connection.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - print("====> Creating ChatCompletionsClient using Entra ID authentication") - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential - ) - else: - raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - - response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - client.close() - print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-client/samples/evaluations/evaluate_test_data.jsonl b/sdk/ai/azure-ai-client/samples/evaluations/evaluate_test_data.jsonl deleted file mode 100644 index 0396a22c2db6..000000000000 --- a/sdk/ai/azure-ai-client/samples/evaluations/evaluate_test_data.jsonl +++ /dev/null @@ -1,3 +0,0 @@ -{"query":"How do you create a run?","context":"AML API only","response":"To create a run using the Azure Machine Learning API, you first need to create an Experiment. Once you have an experiment, you can create a Run object that is associated with that experiment. Here is some Python code that demonstrates this process:\n\n```\nfrom azureml.core import Experiment, Run\nfrom azureml.core.workspace import Workspace\n\n# Define workspace and experiment\nws = Workspace.from_config()\nexp = Experiment(workspace=ws, name='my_experiment')\n\n# Create a new run\nrun = exp.start_logging()\n```\n\nIn this code, the `from_config()` method reads the configuration file that you created when you set up your Azure Machine Learning workspace. The `Experiment` constructor creates an Experiment object that is associated with your workspace, and the `start_logging()` method creates a new Run object that is associated with the Experiment. Now you can use the `run` object to log metrics, upload files, and track other information related to your machine learning experiment.", "ground_truth":"Paris is the capital of France."} -{"query":"How do you log a model?","context":"Logging can be done using any OSS Sdk","response":"There are a few ways to log models in Azure Machine Learning. \n\nOne way is to use the `register_model()` method of the `Run` object. The `register_model()` method logs a model file in the Azure Machine Learning service workspace and makes it available for deployment. Here's an example:\n\n```python\nfrom azureml.core import Model\n\nmodel_path = '.\/outputs\/my_model.pkl'\nmodel = Model.register(workspace=ws, model_path=model_path, model_name='my_model')\n```\n\nThis code registers the model file located at `model_path` to the Azure Machine Learning service workspace with the name `my_model`. \n\nAnother way to log a model is to save it as an output of a `Run`. If your model generation code is part of a script or Jupyter notebook that runs as an Azure Machine Learning experiment, you can save the model file as an output of the `Run` object. Here's an example:\n\n```python\nfrom sklearn.linear_model import LogisticRegression\nfrom azureml.core.run import Run\n\n# Initialize a run object\nrun = Run.get_context()\n\n# Train your model\nX_train, y_train = ...\nclf = LogisticRegression().fit(X_train, y_train)\n\n# Save the model to the Run object's outputs directory\nmodel_path = 'outputs\/model.pkl'\njoblib.dump(value=clf, filename=model_path)\n\n# Log the model as a run artifact\nrun.upload_file(name=model_path, path_or_stream=model_path)\n```\n\nIn this code, `Run.get_context()` retrieves the current run context object, which you can use to track metadata and metrics for the run. After training your model, you can use `joblib.dump()` to save the model to a file, and then log the file as an artifact of the run using `run.upload_file()`.","ground_truth":"Paris is the capital of France."} -{"query":"What is the capital of France?","context":"France is in Europe","response":"Paris is the capital of France.", "ground_truth":"Paris is the capital of France."} diff --git a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py deleted file mode 100644 index 63230a4ce8e8..000000000000 --- a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations.py +++ /dev/null @@ -1,88 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_basics.py - -DESCRIPTION: - This sample demonstrates how to use basic agent operations from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_evaluations.py - - Before running the sample: - - pip install azure-identity - pip install "git+https://github.com/Azure/azure-sdk-for-python.git@users/singankit/ai_project_utils#egg=azure-ai-client&subdirectory=sdk/ai/azure-ai-client" - pip install "git+https://github.com/Azure/azure-sdk-for-python.git@users/singankit/demo_evaluators_id#egg=azure-ai-evaluation&subdirectory=sdk/evaluation/azure-ai-evaluation" - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os, time -from azure.ai.client import AzureAIClient -from azure.identity import DefaultAzureCredential -from azure.ai.client.models import Evaluation, Dataset, EvaluatorConfiguration, ConnectionType -from azure.ai.evaluation import F1ScoreEvaluator, RelevanceEvaluator, HateUnfairnessEvaluator - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -ai_client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) - -# Upload data for evaluation -# Service side fix needed to make this work -# data_id = ai_client.upload_file("./evaluate_test_data.jsonl") -data_id = "azureml://locations/eastus2/workspaces/faa79f3d-91b3-4ed5-afdc-4cc0fe13fb85/data/remote-evals-data/versions/3" - -default_connection = ai_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) - - - -# Create an evaluation -evaluation = Evaluation( - display_name="Remote Evaluation", - description="Evaluation of dataset", - data=Dataset(id=data_id), - evaluators={ - "f1_score": EvaluatorConfiguration( - id=F1ScoreEvaluator.evaluator_id, - ), - "relevance": EvaluatorConfiguration( - id=RelevanceEvaluator.evaluator_id, - init_params={ - "model_config": default_connection.to_evaluator_model_config(deployment_name="GPT-4-Prod", api_version="2024-08-01-preview") - }, - ), - "hate_unfairness": EvaluatorConfiguration( - id=HateUnfairnessEvaluator.evaluator_id, - init_params={ - "azure_ai_project": ai_client.scope - }, - ), - }, - # This is needed as a workaround until environment gets published to registry - properties={"Environment": "azureml://registries/jamahaja-evals-registry/environments/eval-remote-env/versions/6"}, -) - -# Create evaluation -evaluation_response = ai_client.evaluations.create( - evaluation=evaluation, -) - -# Get evaluation -get_evaluation_response = ai_client.evaluations.get(evaluation_response.id) - -print("----------------------------------------------------------------") -print("Created evaluation, evaluation ID: ", get_evaluation_response.id) -print("Evaluation status: ", get_evaluation_response.status) -print("AI Studio URI: ", get_evaluation_response.properties["AiStudioEvaluationUri"]) -print("----------------------------------------------------------------") diff --git a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations_schedules.py b/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations_schedules.py deleted file mode 100644 index 286f5ac53e9b..000000000000 --- a/sdk/ai/azure-ai-client/samples/evaluations/sample_evaluations_schedules.py +++ /dev/null @@ -1,69 +0,0 @@ -from azure.ai.client import AzureAIClient -from azure.identity import DefaultAzureCredential -from azure.ai.client.models import ( - AppInsightsConfiguration, - EvaluatorConfiguration, - SamplingStrategy, - EvaluationSchedule, - CronTrigger, -) - - -def main(): - app_insights_config = AppInsightsConfiguration( - resource_id="sample_id", query="your_connection_string", service_name="sample_service_name" - ) - - f1_evaluator_config = EvaluatorConfiguration( - id="azureml://registries/jamahaja-evals-registry/models/F1ScoreEvaluator/versions/1" - ) - - custom_relevance_evaluator_config = EvaluatorConfiguration( - id="azureml://registries/jamahaja-evals-registry/models/Relevance-Evaluator-AI-Evaluation/versions/2", - init_params={"param3": "value3", "param4": "value4"}, - data_mapping={"data3": "value3", "data4": "value4"}, - ) - - cron_expression = "0 0 0 1/1 * ? *" - cron_trigger = CronTrigger(expression=cron_expression) - evaluators = {"f1_score": f1_evaluator_config, "relevance": custom_relevance_evaluator_config} - - sampling_strategy = SamplingStrategy(rate=0.2) - display_name = "Sample Online Evaluation Schedule" - description = "Sample Online Evaluation Schedule Description" - tags = {"tag1": "value1", "tag2": "value2"} - properties = {"property1": "value1", "property2": "value2"} - - evaluation_schedule = EvaluationSchedule( - data=app_insights_config, - evaluators=evaluators, - trigger=cron_trigger, - sampling_strategy=sampling_strategy, - display_name=display_name, - description=description, - tags=tags, - properties=properties, - ) - - # Project Configuration - Subscription = "" - ResourceGroup = "" - Workspace = "" - Endpoint = "" - client = AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=f"{Endpoint};{Subscription};{ResourceGroup};{Workspace}", - logging_enable=True, - ) - client.evaluations - evaluation_schedule = client.evaluations.create_or_replace_schedule( - id="sample_schedule_id", resource=evaluation_schedule - ) - client.evaluations.get_schedule(evaluation_schedule.id) - client.evaluations.list_schedule() - client.evaluations.list() - client.evaluations.delete_schedule(evaluation_schedule.id) - - -if __name__ == "__main__": - main() diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py deleted file mode 100644 index 08f57cd10c79..000000000000 --- a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_azure_openai_client_async.py +++ /dev/null @@ -1,57 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_get_azure_openai_client_async.py - -DESCRIPTION: - Given an AzureAIClient, this sample demonstrates how to get an authenticated - AsyncAzureOpenAI client from the azure.ai.inference package. - -USAGE: - python sample_get_azure_openai_client_async.py - - Before running the sample: - - pip install azure.ai.client aiohttp openai_async - - Set this environment variable with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import os -import asyncio -from azure.ai.client.aio import AzureAIClient -from azure.identity import DefaultAzureCredential - - -async def sample_get_azure_openai_client_async(): - - async with AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], - ) as ai_client: - - # Get an authenticated AsyncAzureOpenAI client for your default Azure OpenAI connection: - async with await ai_client.inference.get_azure_openai_client() as client: - - response = await client.chat.completions.create( - model="gpt-4-0613", - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], - ) - - print(response.choices[0].message.content) - - -async def main(): - await sample_get_azure_openai_client_async() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py deleted file mode 100644 index efee31557786..000000000000 --- a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_chat_completions_client_async.py +++ /dev/null @@ -1,49 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_get_chat_completions_client_async.py - -DESCRIPTION: - Given an AzureAIClient, this sample demonstrates how to get an authenticated - async ChatCompletionsClient from the azure.ai.inference package. - -USAGE: - python sample_get_chat_completions_client_async.py - - Before running the sample: - - pip install azure.ai.client aiohttp azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import os -import asyncio -from azure.ai.client.aio import AzureAIClient -from azure.ai.inference.models import UserMessage -from azure.identity import DefaultAzureCredential - - -async def sample_get_chat_completions_client_async(): - - async with AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], - ) as ai_client: - - # Get an authenticated async ChatCompletionsClient (from azure.ai.inference) for your default Serverless connection: - async with await ai_client.inference.get_chat_completions_client() as client: - - response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - print(response.choices[0].message.content) - - -async def main(): - await sample_get_chat_completions_client_async() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py b/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py deleted file mode 100644 index 2de28d1b512d..000000000000 --- a/sdk/ai/azure-ai-client/samples/inference/async_samples/sample_get_embeddings_client_async.py +++ /dev/null @@ -1,54 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_get_embeddings_client_async.py - -DESCRIPTION: - Given an AzureAIClient, this sample demonstrates how to get an authenticated - async EmbeddingsClient from the azure.ai.inference package. - -USAGE: - python sample_get_embeddings_client_async.py - - Before running the sample: - - pip install azure.ai.client aiohttp azure-identity - - Set this environment variable with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio -import os -from azure.ai.client.aio import AzureAIClient -from azure.identity import DefaultAzureCredential - - -async def sample_get_embeddings_client_async(): - - async with AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], - ) as ai_client: - - # Get an authenticated async azure.ai.inference embeddings client for your default Serverless connection: - async with await ai_client.inference.get_embeddings_client() as client: - - response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) - - for item in response.data: - length = len(item.embedding) - print( - f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " - f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" - ) - - -async def main(): - await sample_get_embeddings_client_async() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py deleted file mode 100644 index 01f193507201..000000000000 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_azure_openai_client.py +++ /dev/null @@ -1,45 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_get_azure_openai_client.py - -DESCRIPTION: - Given an AzureAIClient, this sample demonstrates how to get an authenticated - AsyncAzureOpenAI client from the azure.ai.inference package. - -USAGE: - python sample_get_azure_openai_client.py - - Before running the sample: - - pip install azure.ai.client openai - - Set this environment variable with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import os -from azure.ai.client import AzureAIClient -from azure.identity import DefaultAzureCredential - -with AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) as ai_client: - - # Get an authenticated OpenAI client for your default Azure OpenAI connection: - with ai_client.inference.get_azure_openai_client() as client: - - response = client.chat.completions.create( - model="gpt-4-0613", - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], - ) - - print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py deleted file mode 100644 index 3835e1b6dc88..000000000000 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_chat_completions_client.py +++ /dev/null @@ -1,38 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_get_chat_completions_client.py - -DESCRIPTION: - Given an AzureAIClient, this sample demonstrates how to get an authenticated - async ChatCompletionsClient from the azure.ai.inference package. - -USAGE: - python sample_get_chat_completions_client.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import os -from azure.ai.client import AzureAIClient -from azure.ai.inference.models import UserMessage -from azure.identity import DefaultAzureCredential - -with AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) as ai_client: - - # Get an authenticated azure.ai.inference chat completions client for your default Serverless connection: - with ai_client.inference.get_chat_completions_client() as client: - - response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - - print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py b/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py deleted file mode 100644 index 57828b7eaffd..000000000000 --- a/sdk/ai/azure-ai-client/samples/inference/sample_get_embeddings_client.py +++ /dev/null @@ -1,42 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_get_embeddings_client.py - -DESCRIPTION: - Given an AzureAIClient, this sample demonstrates how to get an authenticated - async EmbeddingsClient from the azure.ai.inference package. - -USAGE: - python sample_get_embeddings_client.py - - Before running the sample: - - pip install azure.ai.client azure-identity - - Set this environment variable with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import os -from azure.ai.client import AzureAIClient -from azure.identity import DefaultAzureCredential - -with AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) as ai_client: - - # Get an authenticated azure.ai.inference embeddings client for your default Serverless connection: - with ai_client.inference.get_embeddings_client() as client: - - response = client.embed(input=["first phrase", "second phrase", "third phrase"]) - - for item in response.data: - length = len(item.embedding) - print( - f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " - f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" - ) diff --git a/sdk/ai/azure-ai-client/setup.py b/sdk/ai/azure-ai-client/setup.py deleted file mode 100644 index ca2aa3c55e1f..000000000000 --- a/sdk/ai/azure-ai-client/setup.py +++ /dev/null @@ -1,71 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# coding: utf-8 - -import os -import re -from setuptools import setup, find_packages - - -PACKAGE_NAME = "azure-ai-client" -PACKAGE_PPRINT_NAME = "Azure Ai Client" - -# a-b-c => a/b/c -package_folder_path = PACKAGE_NAME.replace("-", "/") - -# Version extraction inspired from 'requests' -with open(os.path.join(package_folder_path, "_version.py"), "r") as fd: - version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) - -if not version: - raise RuntimeError("Cannot find version information") - - -setup( - name=PACKAGE_NAME, - version=version, - description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), - long_description=open("README.md", "r").read(), - long_description_content_type="text/markdown", - license="MIT License", - author="Microsoft Corporation", - author_email="azpysdkhelp@microsoft.com", - url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", - keywords="azure, azure sdk", - classifiers=[ - "Development Status :: 4 - Beta", - "Programming Language :: Python", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "License :: OSI Approved :: MIT License", - ], - zip_safe=False, - packages=find_packages( - exclude=[ - "tests", - # Exclude packages that will be covered by PEP420 or nspkg - "azure", - "azure.ai", - ] - ), - include_package_data=True, - package_data={ - "azure.ai.client": ["py.typed"], - }, - install_requires=[ - "isodate>=0.6.1", - "azure-core>=1.30.0", - "typing-extensions>=4.6.0", - ], - python_requires=">=3.8", -) diff --git a/sdk/ai/azure-ai-client/tests/README.md b/sdk/ai/azure-ai-client/tests/README.md deleted file mode 100644 index 76d76aee9c52..000000000000 --- a/sdk/ai/azure-ai-client/tests/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# copied from azure-ai-inference TODO update - -# Azure AI client library tests for Python - -The instructions below are for running tests locally, on a Windows machine, against the live service. - -## Prerequisites - -The live tests were written against the AI models mentioned below. You will need to deploy a gpt-4o model in the Azure OpenAI Studio, and have the endpoint and key for it: - -- `gpt-4o` on Azure OpenAI (AOAI), for Agents tests - -## Setup - -- Clone or download this sample repository. -- Open a command prompt window in the folder `sdk\ai\azure-ai-client`. -- If you want to run tests against the latest published client library, install it by running: - ```bash - pip install azure-ai-client - ``` -- If you want to run tests against a locally built client library: - - First build the wheel: - ```bash - pip install wheel - pip install -r dev_requirements.txt - python setup.py bdist_wheel - ``` - - Then install the resulting local wheel (update version `1.0.0b2` to the current one): - ```bash - pip install dist\azure_ai_client-1.0.0b1-py3-none-any.whl --user --force-reinstall - ``` - -## Set environment variables - -Here is the list of environment variables used by the tests: - -```bash -# For agents, including tools -set AZURE_AI_CLIENT_AGENTS_CONNECTION_STRING= -``` - - - -## Configure test proxy - -Configure the test proxy to run live service tests without recordings: - -```bash -set AZURE_TEST_RUN_LIVE=true -set AZURE_SKIP_LIVE_RECORDING=true -set PROXY_URL=http://localhost:5000 -set AZURE_TEST_USE_CLI_AUTH=true -``` - -## Run tests - -To run all tests, type: - -```bash -pytest -``` - -For windows run: - -```bash -python -m pytest tests\agents -``` - -## Additional information - -See [test documentation](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md) for additional information, including how to set proxy recordings and run tests using recordings. \ No newline at end of file diff --git a/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py deleted file mode 100644 index 38ff00619039..000000000000 --- a/sdk/ai/azure-ai-client/tests/agents/test_agents_client.py +++ /dev/null @@ -1,1119 +0,0 @@ -# pylint: disable=too-many-lines -# # ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import os -import json -import time -import functools -import datetime -import logging -import sys - -from azure.ai.client import AzureAIClient -from azure.ai.client.models import FunctionTool, CodeInterpreterTool, FileSearchTool, ToolSet -from azure.core.pipeline.transport import RequestsTransport -from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy -from azure.core.exceptions import AzureError, ServiceRequestError, HttpResponseError -from azure.ai.client.models import FunctionTool -from azure.identity import DefaultAzureCredential - -# TODO clean this up / get rid of anything not in use - -""" -issues I've noticed with the code: - delete_thread(thread.id) fails - cancel_thread(thread.id) expires/times out occasionally - added time.sleep() to the beginning of my last few tests to avoid limits - when using the endpoint from Howie, delete_agent(agent.id) did not work but would not cause an error -""" - -# Set to True to enable SDK logging -LOGGING_ENABLED = True - -if LOGGING_ENABLED: - # Create a logger for the 'azure' SDK - # See https://docs.python.org/3/library/logging.html - logger = logging.getLogger("azure") - logger.setLevel(logging.DEBUG) # INFO or DEBUG - - # Configure a console output - handler = logging.StreamHandler(stream=sys.stdout) - logger.addHandler(handler) - - -agentClientPreparer = functools.partial( - EnvironmentVariableLoader, - "azure_ai_client", - project_connection_string_agents_tests="https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", -) -""" -agentClientPreparer = functools.partial( - EnvironmentVariableLoader, - 'azure_ai_client', - azure_ai_client_host_name="https://foo.bar.some-domain.ms", - azure_ai_client_subscription_id="00000000-0000-0000-0000-000000000000", - azure_ai_client_resource_group_name="rg-resour-cegr-oupfoo1", - azure_ai_client_workspace_name="abcd-abcdabcdabcda-abcdefghijklm", -) -""" - - -# create tool for agent use -def fetch_current_datetime_live(): - """ - Get the current time as a JSON string. - - :return: Static time string so that test recordings work. - :rtype: str - """ - current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - time_json = json.dumps({"current_time": current_datetime}) - return time_json - - -# create tool for agent use -def fetch_current_datetime_recordings(): - """ - Get the current time as a JSON string. - - :return: Static time string so that test recordings work. - :rtype: str - """ - time_json = json.dumps({"current_time": "2024-10-10 12:30:19"}) - return time_json - - -# Statically defined user functions for fast reference -user_functions_recording = {"fetch_current_datetime": fetch_current_datetime_recordings} -user_functions_live = {"fetch_current_datetime": fetch_current_datetime_live} - - -# The test class name needs to start with "Test" to get collected by pytest -class TestagentClient(AzureRecordedTestCase): - - # helper function: create client and using environment variables - def create_client(self, **kwargs): - # fetch environment variables - connection_string = kwargs.pop("project_connection_string_agents_tests") - credential = self.get_credential(AzureAIClient, is_async=False) - - # create and return client - client = AzureAIClient.from_connection_string( - credential=credential, - connection=connection_string, - ) - - return client - - # for debugging purposes: if a test fails and its agent has not been deleted, it will continue to show up in the agents list - """ - # NOTE: this test should not be run against a shared resource, as it will delete all agents - @agentClientPreparer() - @recorded_by_proxy - def test_clear_client(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # clear agent list - agents = client.agents.list_agents().data - for agent in agents: - client.agents.delete_agent(agent.id) - assert client.agents.list_agents().data.__len__() == 0 - - # close client - client.close() - """ - - # # ********************************************************************************** - # # - # # UNIT TESTS - # # - # # ********************************************************************************** - - # # ********************************************************************************** - # # - # # HAPPY PATH SERVICE TESTS - agent APIs - # # - # # ********************************************************************************** - - # test client creation - @agentClientPreparer() - @recorded_by_proxy - def test_create_client(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # close client - client.close() - - # test agent creation and deletion - @agentClientPreparer() - @recorded_by_proxy - def test_create_delete_agent(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - print("Created client") - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test agent creation with tools - @agentClientPreparer() - @recorded_by_proxy - def test_create_agent_with_tools(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # initialize agent functions - functions = FunctionTool(functions=user_functions_recording) - - # create agent with tools - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=functions.definitions - ) - assert agent.id - print("Created agent, agent ID", agent.id) - assert agent.tools - assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] - print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - @agentClientPreparer() - @recorded_by_proxy - def test_update_agent(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - - # update agent and confirm changes went through - agent.update(name="my-agent2", instructions="You are helpful agent") - assert agent.name - assert agent.name == "my-agent2" - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - """ - DISABLED: can't perform consistently on shared resource - @agentClientPreparer() - @recorded_by_proxy - def test_agent_list(self, **kwargs): - # create client and ensure there are no previous agents - client = self.create_client(**kwargs) - list_length = client.agents.list_agents().data.__len__() - - # create agent and check that it appears in the list - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert client.agents.list_agents().data.__len__() == list_length + 1 - assert client.agents.list_agents().data[0].id == agent.id - - # create second agent and check that it appears in the list - agent2 = client.agents.create_agent(model="gpt-4o", name="my-agent2", instructions="You are helpful agent") - assert client.agents.list_agents().data.__len__() == list_length + 2 - assert client.agents.list_agents().data[0].id == agent.id or client.agents.list_agents().data[1].id == agent.id - - # delete agents and check list - client.agents.delete_agent(agent.id) - assert client.agents.list_agents().data.__len__() == list_length + 1 - assert client.agents.list_agents().data[0].id == agent2.id - - client.agents.delete_agent(agent2.id) - assert client.agents.list_agents().data.__len__() == list_length - print("Deleted agents") - - # close client - client.close() - """ - - # ********************************************************************************** - # - # HAPPY PATH SERVICE TESTS - Thread APIs - # - # ********************************************************************************** - - # test creating thread - @agentClientPreparer() - @recorded_by_proxy - def test_create_thread(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - # assert isinstance(thread, agentThread) TODO finish this ! need to import agentThread from _models - assert thread.id - print("Created thread, thread ID", thread.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test getting thread - @agentClientPreparer() - @recorded_by_proxy - def test_get_thread(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # get thread - thread2 = client.agents.get_thread(thread.id) - assert thread2.id - assert thread.id == thread2.id - print("Got thread, thread ID", thread2.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - """ - TODO what can I update a thread with? - # test updating thread - @agentClientPreparer() - @recorded_by_proxy - def test_update_thread(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # update thread - client.agents.update_thread(thread.id, ) # TODO what can we update it with? - assert not thread - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - """ - - """ - # TODO this test is failing? client.agents.delete_thread(thread.id) isn't working - # status_code = 404, response = - # error_map = {304: , 401: , 409: } - - # test deleting thread - @agentClientPreparer() - @recorded_by_proxy - def test_delete_thread(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - # assert isinstance(thread, agentThread) TODO finish this ! need to import agentThread from _models - assert thread.id - print("Created thread, thread ID", thread.id) - - # delete thread - deletion_status = client.agents.delete_thread(thread.id) - # assert not thread - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - """ - - # # ********************************************************************************** - # # - # # HAPPY PATH SERVICE TESTS - Message APIs - # # - # # ********************************************************************************** - - # test creating message in a thread - @agentClientPreparer() - @recorded_by_proxy - def test_create_message(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - assert message.id - print("Created message, message ID", message.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test creating multiple messages in a thread - @agentClientPreparer() - @recorded_by_proxy - def test_create_multiple_messages(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create messages - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - assert message.id - print("Created message, message ID", message.id) - message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") - assert message2.id - print("Created message, message ID", message2.id) - message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") - assert message3.id - print("Created message, message ID", message3.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test listing messages in a thread - @agentClientPreparer() - @recorded_by_proxy - def test_list_messages(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # check that initial message list is empty - messages0 = client.agents.list_messages(thread_id=thread.id) - print(messages0.data) - assert messages0.data.__len__() == 0 - - # create messages and check message list for each one - message1 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - assert message1.id - print("Created message, message ID", message1.id) - messages1 = client.agents.list_messages(thread_id=thread.id) - assert messages1.data.__len__() == 1 - assert messages1.data[0].id == message1.id - - message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") - assert message2.id - print("Created message, message ID", message2.id) - messages2 = client.agents.list_messages(thread_id=thread.id) - assert messages2.data.__len__() == 2 - assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id - - message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") - assert message3.id - print("Created message, message ID", message3.id) - messages3 = client.agents.list_messages(thread_id=thread.id) - assert messages3.data.__len__() == 3 - assert ( - messages3.data[0].id == message3.id - or messages3.data[1].id == message2.id - or messages3.data[2].id == message2.id - ) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test getting message in a thread - @agentClientPreparer() - @recorded_by_proxy - def test_get_message(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - assert message.id - print("Created message, message ID", message.id) - - # get message - message2 = client.agents.get_message(thread_id=thread.id, message_id=message.id) - assert message2.id - assert message.id == message2.id - print("Got message, message ID", message.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - """ - TODO format the updated body - # test updating message in a thread - @agentClientPreparer() - @recorded_by_proxy - def test_update_message(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - assert message.id - print("Created message, message ID", message.id) - - # update message - body_json = json.dumps # TODO format body into json -- figure out what the message looks like so I can update it (might be in that picture) - client.agents.update_message(thread_id=thread.id, message_id=message.id, body=) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - """ - - # # ********************************************************************************** - # # - # # HAPPY PATH SERVICE TESTS - Run APIs - # # - # # ********************************************************************************** - - # test creating run - @agentClientPreparer() - @recorded_by_proxy - def test_create_run(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test getting run - @agentClientPreparer() - @recorded_by_proxy - def test_get_run(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - # get run - run2 = client.agents.get_run(thread_id=thread.id, run_id=run.id) - assert run2.id - assert run.id == run2.id - print("Got run, run ID", run2.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # TODO fix bc sometimes it works? and sometimes it doesnt? - # test sucessful run status TODO test for cancelled/unsucessful runs - @agentClientPreparer() - @recorded_by_proxy - def test_run_status(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - assert message.id - print("Created message, message ID", message.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - # check status - assert run.status in [ - "queued", - "in_progress", - "requires_action", - "cancelling", - "cancelled", - "failed", - "completed", - "expired", - ] - while run.status in ["queued", "in_progress", "requires_action"]: - # wait for a second - time.sleep(1) - run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - print("Run status:", run.status) - - assert run.status in ["cancelled", "failed", "completed", "expired"] - print("Run completed with status:", run.status) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - """ - # TODO another, but check that the number of runs decreases after cancelling runs - # TODO can each thread only support one run? - # test listing runs - @agentClientPreparer() - @recorded_by_proxy - def test_list_runs(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # check list for current runs - runs0 = client.agents.list_runs(thread_id=thread.id) - assert runs0.data.__len__() == 0 - - # create run and check list - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - runs1 = client.agents.list_runs(thread_id=thread.id) - assert runs1.data.__len__() == 1 - assert runs1.data[0].id == run.id - - # create second run - run2 = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run2.id - print("Created run, run ID", run2.id) - runs2 = client.agents.list_runs(thread_id=thread.id) - assert runs2.data.__len__() == 2 - assert runs2.data[0].id == run2.id or runs2.data[1].id == run2.id - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - """ - - """ - # TODO figure out what to update the run with - # test updating run - @agentClientPreparer() - @recorded_by_proxy - def test_update_run(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - # update run - body = json.dumps({'todo': 'placeholder'}) - client.agents.update_run(thread_id=thread.id, run_id=run.id, body=body) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - """ - - # test submitting tool outputs to run - @agentClientPreparer() - @recorded_by_proxy - def test_submit_tool_outputs_to_run(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # Initialize agent tools - functions = FunctionTool(user_functions_recording) - code_interpreter = CodeInterpreterTool() - - toolset = ToolSet() - toolset.add(functions) - toolset.add(code_interpreter) - - # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent", toolset=toolset - ) - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") - assert message.id - print("Created message, message ID", message.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - # check that tools are uploaded - assert run.tools - assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] - print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) - - # check status - assert run.status in [ - "queued", - "in_progress", - "requires_action", - "cancelling", - "cancelled", - "failed", - "completed", - "expired", - ] - while run.status in ["queued", "in_progress", "requires_action"]: - time.sleep(1) - run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - - # check if tools are needed - if run.status == "requires_action" and run.required_action.submit_tool_outputs: - print("Requires action: submit tool outputs") - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print( - "No tool calls provided - cancelling run" - ) # TODO how can i make sure that it wants tools? should i have some kind of error message? - client.agents.cancel_run(thread_id=thread.id, run_id=run.id) - break - - # submit tool outputs to run - tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here - print("Tool outputs:", tool_outputs) - if tool_outputs: - client.agents.submit_tool_outputs_to_run( - thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs - ) - - print("Current run status:", run.status) - - print("Run completed with status:", run.status) - - # check that messages used the tool - messages = client.agents.list_messages(thread_id=thread.id, run_id=run.id) - tool_message = messages["data"][0]["content"][0]["text"]["value"] - hour12 = time.strftime("%H") - hour24 = time.strftime("%I") - minute = time.strftime("%M") - assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute - print("Used tool_outputs") - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - """ - # DISABLED: rewrite to ensure run is not complete when cancel_run is called - # test cancelling run - @agentClientPreparer() - @recorded_by_proxy - def test_cancel_run(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") - assert message.id - print("Created message, message ID", message.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - # check status and cancel - assert run.status in ["queued", "in_progress", "requires_action"] - client.agents.cancel_run(thread_id=thread.id, run_id=run.id) - - while run.status in ["queued", "cancelling"]: - time.sleep(1) - run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - print("Current run status:", run.status) - assert run.status == "cancelled" - print("Run cancelled") - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - """ - - # test create thread and run - @agentClientPreparer() - @recorded_by_proxy - def test_create_thread_and_run(self, **kwargs): - time.sleep(26) - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread and run - run = client.agents.create_thread_and_run(assistant_id=agent.id) - assert run.id - assert run.thread_id - print("Created run, run ID", run.id) - - # get thread - thread = client.agents.get_thread(run.thread_id) - assert thread.id - print("Created thread, thread ID", thread.id) - - # check status - assert run.status in [ - "queued", - "in_progress", - "requires_action", - "cancelling", - "cancelled", - "failed", - "completed", - "expired", - ] - while run.status in ["queued", "in_progress", "requires_action"]: - # wait for a second - time.sleep(1) - run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - # assert run.status in ["queued", "in_progress", "requires_action", "completed"] - print("Run status:", run.status) - - assert run.status == "completed" - print("Run completed") - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test listing run steps - @agentClientPreparer() - @recorded_by_proxy - def test_list_run_step(self, **kwargs): - - time.sleep(50) - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") - assert message.id - print("Created message, message ID", message.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) - # commenting assertion out below, do we know exactly when run starts? - # assert steps['data'].__len__() == 0 - - # check status - assert run.status in ["queued", "in_progress", "requires_action", "completed"] - while run.status in ["queued", "in_progress", "requires_action"]: - # wait for a second - time.sleep(1) - run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - assert run.status in ["queued", "in_progress", "requires_action", "completed"] - print("Run status:", run.status) - steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) - assert steps["data"].__len__() > 0 # TODO what else should we look at? - - assert run.status == "completed" - print("Run completed") - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test getting run step - # TODO where are step ids from - @agentClientPreparer() - @recorded_by_proxy - def test_get_run_step(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AzureAIClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" - ) - assert message.id - print("Created message, message ID", message.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - if run.status == "failed": - assert run.last_error - print(run.last_error) - print("FAILED HERE") - - # check status - assert run.status in ["queued", "in_progress", "requires_action", "completed"] - while run.status in ["queued", "in_progress", "requires_action"]: - # wait for a second - time.sleep(1) - run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - if run.status == "failed": - assert run.last_error - print(run.last_error) - print("FAILED HERE") - assert run.status in ["queued", "in_progress", "requires_action", "completed"] - print("Run status:", run.status) - - # list steps, check that get_run_step works with first step_id - steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) - assert steps["data"].__len__() > 0 - step = steps["data"][0] - get_step = client.agents.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) - assert step == get_step - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # # ********************************************************************************** - # # - # # HAPPY PATH SERVICE TESTS - Streaming APIs - # # - # # ********************************************************************************** - - # # ********************************************************************************** - # # - # # NEGATIVE TESTS - TODO idk what goes here - # # - # # ********************************************************************************** - - """ - # DISABLED, PASSES LIVE ONLY: recordings don't capture DNS lookup errors - # test agent creation and deletion - @agentClientPreparer() - @recorded_by_proxy - def test_negative_create_delete_agent(self, **kwargs): - # create client using bad endpoint - bad_connection_string = "https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm" - - credential = self.get_credential(AzureAIClient, is_async=False) - client = AzureAIClient.from_connection_string( - credential=credential, - connection=bad_connection_string, - ) - - # attempt to create agent with bad client - exception_caught = False - try: - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - # check for error (will not have a status code since it failed on request -- no response was recieved) - except (ServiceRequestError, HttpResponseError) as e: - exception_caught = True - if type(e) == ServiceRequestError: - assert e.message - assert "failed to resolve 'foo.bar.some-domain.ms'" in e.message.lower() - else: - assert "No such host is known" and "foo.bar.some-domain.ms" in str(e) - - # close client and confirm an exception was caught - client.close() - assert exception_caught - """ diff --git a/sdk/ai/azure-ai-client/tests/agents/test_deserialization.py b/sdk/ai/azure-ai-client/tests/agents/test_deserialization.py deleted file mode 100644 index d42b1db75120..000000000000 --- a/sdk/ai/azure-ai-client/tests/agents/test_deserialization.py +++ /dev/null @@ -1,92 +0,0 @@ -# # ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import copy -import datetime -import pytest - -from azure.ai.client.models._models import ThreadRun, RunStep, ThreadMessage -from azure.ai.client.models._patch import _safe_instantiate, _filter_parameters - -class TestDeserialization: - """Tests for deserialization of sse responses.""" - - @pytest.mark.parametrize( - "valid_params,model_cls", - [ - ( - { - "id": "12345", - "object": "thread.run", - "thread_id": "6789", - "assistant_id": "101112", - "status": "in_progress", - "required_action": "test", - "last_error": "none", - "model": "gpt-4", - "instructions": "Test instruction", - "tools": "Test function", - "created_at": datetime.datetime(2024, 11, 14), - "expires_at": datetime.datetime(2024, 11, 17), - "started_at": datetime.datetime(2024, 11, 15), - "completed_at": datetime.datetime(2024, 11, 16), - "cancelled_at": datetime.datetime(2024, 11, 16), - "failed_at": datetime.datetime(2024, 11, 16), - "incomplete_details": "max_completion_tokens", - "usage": "in_progress", - "temperature": 1.0, - "top_p": 1.0, - "max_completion_tokens": 1000, - "truncation_strategy": "test", - "tool_choice": "tool name", - "response_format": "json", - "metadata": {"foo": "bar"}, - "tool_resources": "test", - "parallel_tool_calls": True, - }, - ThreadRun, - ), - ( - { - "id": "1233", - "object": "thread.message", - "created_at": datetime.datetime(2024, 11, 14), - "thread_id": "5678", - "status": "incomplete", - "incomplete_details": "test", - "completed_at": datetime.datetime(2024, 11, 16), - "incomplete_at": datetime.datetime(2024, 11, 16), - "role": "assistant", - "content": "Test", - "assistant_id": "9911", - "run_id": "11", - "attachments": ["4", "8", "15", "16", "23", "42"], - "metadata": {"foo", "bar"}, - }, - ThreadMessage, - ), - ], - ) - def test_correct_thread_params(self, valid_params, model_cls): - """Test that if service returned extra parameter in SSE response, it does not create issues.""" - - bad_params = {"foo": "bar"} - params = copy.deepcopy(valid_params) - params.update(bad_params) - # We should bot e able to create Thread Run with bad parameters. - with pytest.raises(TypeError): - model_cls(**params) - filtered_params = _filter_parameters(model_cls, params) - for k in valid_params: - assert k in filtered_params - for k in bad_params: - assert k not in filtered_params - # Implicitly check that we can create object with the filtered parameters. - model_cls(**filtered_params) - # Check safe initialization. - assert isinstance(_safe_instantiate(model_cls, params), model_cls) - - def test_safe_instantiate_non_dict(self): - """Test that safe_instantiate method when user supplies not a dictionary.""" - assert _safe_instantiate(RunStep, 42) == 42 diff --git a/sdk/ai/azure-ai-client/tests/conftest.py b/sdk/ai/azure-ai-client/tests/conftest.py deleted file mode 100644 index d944cdf86007..000000000000 --- a/sdk/ai/azure-ai-client/tests/conftest.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -import pytest -from devtools_testutils import test_proxy, remove_batch_sanitizers - - -# autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method -@pytest.fixture(scope="session", autouse=True) -def start_proxy(test_proxy): - return - - -@pytest.fixture(scope="session", autouse=True) -def add_sanitizers(test_proxy): - # Remove the following sanitizers since certain fields are needed in tests and are non-sensitive: - # - AZSDK3493: $..name - remove_batch_sanitizers(["AZSDK3493"]) diff --git a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py b/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py deleted file mode 100644 index 341224aae4e5..000000000000 --- a/sdk/ai/azure-ai-client/tests/endpoints/unit_tests.py +++ /dev/null @@ -1,114 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import sys -import logging -import datetime -from azure.ai.client.models import SASTokenCredential -from azure.core.credentials import TokenCredential, AccessToken -from azure.core.exceptions import HttpResponseError - -# import azure.ai.client as sdk - -# Set to True to enable SDK logging -LOGGING_ENABLED = True - -if LOGGING_ENABLED: - # Create a logger for the 'azure' SDK - # See https://docs.python.org/3/library/logging.html - logger = logging.getLogger("azure") - logger.setLevel(logging.DEBUG) # INFO or DEBUG - - # Configure a console output - handler = logging.StreamHandler(stream=sys.stdout) - logger.addHandler(handler) - - -class FakeTokenCredential(TokenCredential): - def get_token(self, *scopes, **kwargs): - # Create a fake token with an expiration time - token = "fake_token" - expires_on = datetime.datetime.now() + datetime.timedelta(hours=1) - return AccessToken(token, expires_on.timestamp()) - - -# The test class name needs to start with "Test" to get collected by pytest -class TestUnit: - - # ********************************************************************************** - # - # UNIT TESTS - # - # ********************************************************************************** - - def test_sas_token_credential_class_mocked(self, **kwargs): - import jwt - import datetime - import time - - # Create a simple JWT with 10 seconds expiration time - token_duration_sec = 5 - secret_key = "my_secret_key" - token_duration_sec = 5 - sas_token_expiration: datetime = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta( - seconds=token_duration_sec - ) - sas_token_expiration = sas_token_expiration.replace(microsecond=0) - payload = {"exp": sas_token_expiration} - sas_token = jwt.encode(payload, secret_key) - - # You can parse the token string on https://jwt.ms/. The "exp" value there is the - # token expiration time in Unix timestamp format (seconds since 1970-01-01 00:00:00 UTC). - # See https://www.epochconverter.com/ to convert Unix time to readable date & time. - # The base64 decoded string will look something like this: - # { - # "alg": "HS256", - # "typ": "JWT" - # }.{ - # "exp": 1727208894 - # }.[Signature] - print(f"Generated JWT token: {sas_token}") - - sas_token_credential = SASTokenCredential( - sas_token=sas_token, - credential=FakeTokenCredential(), - subscription_id="fake_subscription_id", - resource_group_name="fake_resouce_group", - project_name="fake_project_name", - connection_name="fake_connection_name", - ) - assert sas_token_credential._expires_on == sas_token_expiration - - exception_caught = False - try: - for _ in range(token_duration_sec + 2): - print("Looping...") - time.sleep(1) - access_token = sas_token_credential.get_token() - except HttpResponseError as e: - exception_caught = True - print(e) - assert exception_caught - - # Unit tests for the SASTokenCredential class - def test_sas_token_credential_class_real(self, **kwargs): - - # Example of real SAS token for AOAI service. You can parse it on https://jwt.ms/. The "exp" value there is the - # token expiration time in Unix timestamp format (seconds since 1970-01-01 00:00:00 UTC) - token = "eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleTEiLCJ0eXAiOiJKV1QifQ.eyJyZWdpb24iOiJlYXN0dXMyZXVhcCIsInN1YnNjcmlwdGlvbi1pZCI6IjQyZjVlYWFjMjc5MDRiMGViMDI4ZTVkZjcyYzg5ZDAxIiwicHJvZHVjdC1pZCI6Ik9wZW5BSS5TMCIsImNvZ25pdGl2ZS1zZXJ2aWNlcy1lbmRwb2ludCI6Imh0dHBzOi8vYXBpLmNvZ25pdGl2ZS5taWNyb3NvZnQuY29tL2ludGVybmFsL3YxLjAvIiwiYXp1cmUtcmVzb3VyY2UtaWQiOiIvc3Vic2NyaXB0aW9ucy84ZjMzOGY2ZS00ZmNlLTQ0YWUtOTY5Yy1mYzdkOGZkYTAzMGUvcmVzb3VyY2VHcm91cHMvYXJncnlnb3JfY2FuYXJ5L3Byb3ZpZGVycy9NaWNyb3NvZnQuQ29nbml0aXZlU2VydmljZXMvYWNjb3VudHMvYXJncnlnb3ItY2FuYXJ5LWFvYWkiLCJzY29wZSI6Imh0dHBzOi8vc3BlZWNoLnBsYXRmb3JtLmJpbmcuY29tIiwiYXVkIjoidXJuOm1zLnNwZWVjaCIsImV4cCI6MTcyNjc4MjI0NiwiaXNzIjoidXJuOm1zLmNvZ25pdGl2ZXNlcnZpY2VzIn0.L7VvsXPzbwHQeMS-o9Za4itkU6uP4-KFMyOpTsYD9tpIJa_qChMHDl8FHy5n7K5L1coKg8sJE6LlJICFdU1ALQ" - expiration_date_linux_time = 1726782246 # Value of "exp" field in the token. See https://www.epochconverter.com/ to convert to date & time - expiration_datatime_utc = datetime.datetime.fromtimestamp(expiration_date_linux_time, datetime.timezone.utc) - print(f"\n[TEST] Expected expiration date: {expiration_datatime_utc}") - - sas_token_credential = SASTokenCredential( - sas_token=token, - credential=None, - subscription_id=None, - resource_group_name=None, - project_name=None, - connection_name=None, - ) - - print(f"[TEST] Actual expiration date: {sas_token_credential._expires_on}") - assert sas_token_credential._expires_on == expiration_datatime_utc diff --git a/sdk/ai/azure-ai-client/tsp-location.yaml b/sdk/ai/azure-ai-client/tsp-location.yaml deleted file mode 100644 index 4b3b80fb4e8a..000000000000 --- a/sdk/ai/azure-ai-client/tsp-location.yaml +++ /dev/null @@ -1,4 +0,0 @@ -directory: specification/ai/Azure.AI.Client -commit: 0c04c10b19c71ca88bcfe42015d4de9ad027764b -repo: Azure/azure-rest-api-specs -additionalDirectories: diff --git a/sdk/ai/azure-ai-project/CHANGELOG.md b/sdk/ai/azure-ai-project/CHANGELOG.md deleted file mode 100644 index 628743d283a9..000000000000 --- a/sdk/ai/azure-ai-project/CHANGELOG.md +++ /dev/null @@ -1,5 +0,0 @@ -# Release History - -## 1.0.0b1 (1970-01-01) - -- Initial version diff --git a/sdk/ai/azure-ai-project/LICENSE b/sdk/ai/azure-ai-project/LICENSE deleted file mode 100644 index 63447fd8bbbf..000000000000 --- a/sdk/ai/azure-ai-project/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) Microsoft Corporation. - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/MANIFEST.in b/sdk/ai/azure-ai-project/MANIFEST.in deleted file mode 100644 index 1486c4804328..000000000000 --- a/sdk/ai/azure-ai-project/MANIFEST.in +++ /dev/null @@ -1,7 +0,0 @@ -include *.md -include LICENSE -include azure/ai/project/py.typed -recursive-include tests *.py -recursive-include samples *.py *.md -include azure/__init__.py -include azure/ai/__init__.py \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/README.md b/sdk/ai/azure-ai-project/README.md deleted file mode 100644 index 1076c7ef0670..000000000000 --- a/sdk/ai/azure-ai-project/README.md +++ /dev/null @@ -1,80 +0,0 @@ - - -# Azure Ai Project client library for Python - - -## Getting started - -### Install the package - -```bash -python -m pip install azure-ai-project -``` - -#### Prequisites - -- Python 3.8 or later is required to use this package. -- You need an [Azure subscription][azure_sub] to use this package. -- An existing Azure Ai Project instance. -#### Create with an Azure Active Directory Credential -To use an [Azure Active Directory (AAD) token credential][authenticate_with_token], -provide an instance of the desired credential type obtained from the -[azure-identity][azure_identity_credentials] library. - -To authenticate with AAD, you must first [pip][pip] install [`azure-identity`][azure_identity_pip] - -After setup, you can choose which type of [credential][azure_identity_credentials] from azure.identity to use. -As an example, [DefaultAzureCredential][default_azure_credential] can be used to authenticate the client: - -Set the values of the client ID, tenant ID, and client secret of the AAD application as environment variables: -`AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, `AZURE_CLIENT_SECRET` - -Use the returned token credential to authenticate the client: - -```python ->>> from azure.ai.project import AIProjectClient ->>> from azure.identity import DefaultAzureCredential ->>> client = AIProjectClient(endpoint='', credential=DefaultAzureCredential()) -``` - -## Examples - -```python ->>> from azure.ai.project import AIProjectClient ->>> from azure.identity import DefaultAzureCredential ->>> from azure.core.exceptions import HttpResponseError - ->>> client = AIProjectClient(endpoint='', credential=DefaultAzureCredential()) ->>> try: - - except HttpResponseError as e: - print('service responds error: {}'.format(e.response.json())) - -``` - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require -you to agree to a Contributor License Agreement (CLA) declaring that you have -the right to, and actually do, grant us the rights to use your contribution. -For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether -you need to provide a CLA and decorate the PR appropriately (e.g., label, -comment). Simply follow the instructions provided by the bot. You will only -need to do this once across all repos using our CLA. - -This project has adopted the -[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, -see the Code of Conduct FAQ or contact opencode@microsoft.com with any -additional questions or comments. - - -[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ -[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token -[azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials -[azure_identity_pip]: https://pypi.org/project/azure-identity/ -[default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential -[pip]: https://pypi.org/project/pip/ -[azure_sub]: https://azure.microsoft.com/free/ - diff --git a/sdk/ai/azure-ai-project/azure/__init__.py b/sdk/ai/azure-ai-project/azure/__init__.py deleted file mode 100644 index d55ccad1f573..000000000000 --- a/sdk/ai/azure-ai-project/azure/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-project/azure/ai/__init__.py b/sdk/ai/azure-ai-project/azure/ai/__init__.py deleted file mode 100644 index d55ccad1f573..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-project/azure/ai/project/__init__.py b/sdk/ai/azure-ai-project/azure/ai/project/__init__.py deleted file mode 100644 index 743119593f69..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# pylint: disable=wrong-import-position - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from ._patch import * # pylint: disable=unused-wildcard-import - -from ._client import AIProjectClient # type: ignore -from ._version import VERSION - -__version__ = VERSION - -try: - from ._patch import __all__ as _patch_all - from ._patch import * -except ImportError: - _patch_all = [] -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "AIProjectClient", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore - -_patch_sdk() diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_client.py b/sdk/ai/azure-ai-project/azure/ai/project/_client.py deleted file mode 100644 index 84a19d80cde9..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/_client.py +++ /dev/null @@ -1,137 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any, TYPE_CHECKING -from typing_extensions import Self - -from azure.core import PipelineClient -from azure.core.pipeline import policies -from azure.core.rest import HttpRequest, HttpResponse - -from ._configuration import AIProjectClientConfiguration -from ._serialization import Deserializer, Serializer -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations - -if TYPE_CHECKING: - from azure.core.credentials import TokenCredential - - -class AIProjectClient: - """AIProjectClient. - - :ivar agents: AgentsOperations operations - :vartype agents: azure.ai.project.operations.AgentsOperations - :ivar connections: ConnectionsOperations operations - :vartype connections: azure.ai.project.operations.ConnectionsOperations - :ivar evaluations: EvaluationsOperations operations - :vartype evaluations: azure.ai.project.operations.EvaluationsOperations - :param endpoint: The Azure AI Studio project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``\\\\ , where - :code:`` is the Azure region where the project is deployed (e.g. westus) and - :code:`` is the GUID of the Enterprise private link. Required. - :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Studio project name. Required. - :type project_name: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-07-01-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "TokenCredential", - **kwargs: Any - ) -> None: - _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" # pylint: disable=line-too-long - self._config = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - **kwargs - ) - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) - self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) - self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) - - def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = client.send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.HttpResponse - """ - - request_copy = deepcopy(request) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - - request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) - return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - - def close(self) -> None: - self._client.close() - - def __enter__(self) -> Self: - self._client.__enter__() - return self - - def __exit__(self, *exc_details: Any) -> None: - self._client.__exit__(*exc_details) diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_configuration.py b/sdk/ai/azure-ai-project/azure/ai/project/_configuration.py deleted file mode 100644 index f39a4dfc1f76..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/_configuration.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline import policies - -from ._version import VERSION - -if TYPE_CHECKING: - from azure.core.credentials import TokenCredential - - -class AIProjectClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for AIProjectClient. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param endpoint: The Azure AI Studio project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``\\ , where :code:`` - is the Azure region where the project is deployed (e.g. westus) and :code:`` - is the GUID of the Enterprise private link. Required. - :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Studio project name. Required. - :type project_name: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-07-01-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "TokenCredential", - **kwargs: Any - ) -> None: - api_version: str = kwargs.pop("api_version", "2024-07-01-preview") - - if endpoint is None: - raise ValueError("Parameter 'endpoint' must not be None.") - if subscription_id is None: - raise ValueError("Parameter 'subscription_id' must not be None.") - if resource_group_name is None: - raise ValueError("Parameter 'resource_group_name' must not be None.") - if project_name is None: - raise ValueError("Parameter 'project_name' must not be None.") - if credential is None: - raise ValueError("Parameter 'credential' must not be None.") - - self.endpoint = endpoint - self.subscription_id = subscription_id - self.resource_group_name = resource_group_name - self.project_name = project_name - self.credential = credential - self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) - kwargs.setdefault("sdk_moniker", "ai-project/{}".format(VERSION)) - self.polling_interval = kwargs.get("polling_interval", 30) - self._configure(**kwargs) - - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.BearerTokenCredentialPolicy( - self.credential, *self.credential_scopes, **kwargs - ) diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_model_base.py b/sdk/ai/azure-ai-project/azure/ai/project/_model_base.py deleted file mode 100644 index e6a2730f9276..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/_model_base.py +++ /dev/null @@ -1,1159 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=protected-access, broad-except - -import copy -import calendar -import decimal -import functools -import sys -import logging -import base64 -import re -import typing -import enum -import email.utils -from datetime import datetime, date, time, timedelta, timezone -from json import JSONEncoder -import xml.etree.ElementTree as ET -from typing_extensions import Self -import isodate -from azure.core.exceptions import DeserializationError -from azure.core import CaseInsensitiveEnumMeta -from azure.core.pipeline import PipelineResponse -from azure.core.serialization import _Null - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping - -_LOGGER = logging.getLogger(__name__) - -__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] - -TZ_UTC = timezone.utc -_T = typing.TypeVar("_T") - - -def _timedelta_as_isostr(td: timedelta) -> str: - """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' - - Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython - - :param timedelta td: The timedelta to convert - :rtype: str - :return: ISO8601 version of this timedelta - """ - - # Split seconds to larger units - seconds = td.total_seconds() - minutes, seconds = divmod(seconds, 60) - hours, minutes = divmod(minutes, 60) - days, hours = divmod(hours, 24) - - days, hours, minutes = list(map(int, (days, hours, minutes))) - seconds = round(seconds, 6) - - # Build date - date_str = "" - if days: - date_str = "%sD" % days - - if hours or minutes or seconds: - # Build time - time_str = "T" - - # Hours - bigger_exists = date_str or hours - if bigger_exists: - time_str += "{:02}H".format(hours) - - # Minutes - bigger_exists = bigger_exists or minutes - if bigger_exists: - time_str += "{:02}M".format(minutes) - - # Seconds - try: - if seconds.is_integer(): - seconds_string = "{:02}".format(int(seconds)) - else: - # 9 chars long w/ leading 0, 6 digits after decimal - seconds_string = "%09.6f" % seconds - # Remove trailing zeros - seconds_string = seconds_string.rstrip("0") - except AttributeError: # int.is_integer() raises - seconds_string = "{:02}".format(seconds) - - time_str += "{}S".format(seconds_string) - else: - time_str = "" - - return "P" + date_str + time_str - - -def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: - encoded = base64.b64encode(o).decode() - if format == "base64url": - return encoded.strip("=").replace("+", "-").replace("/", "_") - return encoded - - -def _serialize_datetime(o, format: typing.Optional[str] = None): - if hasattr(o, "year") and hasattr(o, "hour"): - if format == "rfc7231": - return email.utils.format_datetime(o, usegmt=True) - if format == "unix-timestamp": - return int(calendar.timegm(o.utctimetuple())) - - # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) - if not o.tzinfo: - iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() - else: - iso_formatted = o.astimezone(TZ_UTC).isoformat() - # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) - return iso_formatted.replace("+00:00", "Z") - # Next try datetime.date or datetime.time - return o.isoformat() - - -def _is_readonly(p): - try: - return p._visibility == ["read"] - except AttributeError: - return False - - -class SdkJSONEncoder(JSONEncoder): - """A JSON encoder that's capable of serializing datetime objects and bytes.""" - - def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): - super().__init__(*args, **kwargs) - self.exclude_readonly = exclude_readonly - self.format = format - - def default(self, o): # pylint: disable=too-many-return-statements - if _is_model(o): - if self.exclude_readonly: - readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] - return {k: v for k, v in o.items() if k not in readonly_props} - return dict(o.items()) - try: - return super(SdkJSONEncoder, self).default(o) - except TypeError: - if isinstance(o, _Null): - return None - if isinstance(o, decimal.Decimal): - return float(o) - if isinstance(o, (bytes, bytearray)): - return _serialize_bytes(o, self.format) - try: - # First try datetime.datetime - return _serialize_datetime(o, self.format) - except AttributeError: - pass - # Last, try datetime.timedelta - try: - return _timedelta_as_isostr(o) - except AttributeError: - # This will be raised when it hits value.total_seconds in the method above - pass - return super(SdkJSONEncoder, self).default(o) - - -_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") -_VALID_RFC7231 = re.compile( - r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" - r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" -) - - -def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: - """Deserialize ISO-8601 formatted string into Datetime object. - - :param str attr: response string to be deserialized. - :rtype: ~datetime.datetime - :returns: The datetime object from that input - """ - if isinstance(attr, datetime): - # i'm already deserialized - return attr - attr = attr.upper() - match = _VALID_DATE.match(attr) - if not match: - raise ValueError("Invalid datetime string: " + attr) - - check_decimal = attr.split(".") - if len(check_decimal) > 1: - decimal_str = "" - for digit in check_decimal[1]: - if digit.isdigit(): - decimal_str += digit - else: - break - if len(decimal_str) > 6: - attr = attr.replace(decimal_str, decimal_str[0:6]) - - date_obj = isodate.parse_datetime(attr) - test_utc = date_obj.utctimetuple() - if test_utc.tm_year > 9999 or test_utc.tm_year < 1: - raise OverflowError("Hit max or min date") - return date_obj - - -def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: - """Deserialize RFC7231 formatted string into Datetime object. - - :param str attr: response string to be deserialized. - :rtype: ~datetime.datetime - :returns: The datetime object from that input - """ - if isinstance(attr, datetime): - # i'm already deserialized - return attr - match = _VALID_RFC7231.match(attr) - if not match: - raise ValueError("Invalid datetime string: " + attr) - - return email.utils.parsedate_to_datetime(attr) - - -def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: - """Deserialize unix timestamp into Datetime object. - - :param str attr: response string to be deserialized. - :rtype: ~datetime.datetime - :returns: The datetime object from that input - """ - if isinstance(attr, datetime): - # i'm already deserialized - return attr - return datetime.fromtimestamp(attr, TZ_UTC) - - -def _deserialize_date(attr: typing.Union[str, date]) -> date: - """Deserialize ISO-8601 formatted string into Date object. - :param str attr: response string to be deserialized. - :rtype: date - :returns: The date object from that input - """ - # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. - if isinstance(attr, date): - return attr - return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore - - -def _deserialize_time(attr: typing.Union[str, time]) -> time: - """Deserialize ISO-8601 formatted string into time object. - - :param str attr: response string to be deserialized. - :rtype: datetime.time - :returns: The time object from that input - """ - if isinstance(attr, time): - return attr - return isodate.parse_time(attr) - - -def _deserialize_bytes(attr): - if isinstance(attr, (bytes, bytearray)): - return attr - return bytes(base64.b64decode(attr)) - - -def _deserialize_bytes_base64(attr): - if isinstance(attr, (bytes, bytearray)): - return attr - padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore - attr = attr + padding # type: ignore - encoded = attr.replace("-", "+").replace("_", "/") - return bytes(base64.b64decode(encoded)) - - -def _deserialize_duration(attr): - if isinstance(attr, timedelta): - return attr - return isodate.parse_duration(attr) - - -def _deserialize_decimal(attr): - if isinstance(attr, decimal.Decimal): - return attr - return decimal.Decimal(str(attr)) - - -def _deserialize_int_as_str(attr): - if isinstance(attr, int): - return attr - return int(attr) - - -_DESERIALIZE_MAPPING = { - datetime: _deserialize_datetime, - date: _deserialize_date, - time: _deserialize_time, - bytes: _deserialize_bytes, - bytearray: _deserialize_bytes, - timedelta: _deserialize_duration, - typing.Any: lambda x: x, - decimal.Decimal: _deserialize_decimal, -} - -_DESERIALIZE_MAPPING_WITHFORMAT = { - "rfc3339": _deserialize_datetime, - "rfc7231": _deserialize_datetime_rfc7231, - "unix-timestamp": _deserialize_datetime_unix_timestamp, - "base64": _deserialize_bytes, - "base64url": _deserialize_bytes_base64, -} - - -def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): - if annotation is int and rf and rf._format == "str": - return _deserialize_int_as_str - if rf and rf._format: - return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) - return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore - - -def _get_type_alias_type(module_name: str, alias_name: str): - types = { - k: v - for k, v in sys.modules[module_name].__dict__.items() - if isinstance(v, typing._GenericAlias) # type: ignore - } - if alias_name not in types: - return alias_name - return types[alias_name] - - -def _get_model(module_name: str, model_name: str): - models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} - module_end = module_name.rsplit(".", 1)[0] - models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) - if isinstance(model_name, str): - model_name = model_name.split(".")[-1] - if model_name not in models: - return model_name - return models[model_name] - - -_UNSET = object() - - -class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object - def __init__(self, data: typing.Dict[str, typing.Any]) -> None: - self._data = data - - def __contains__(self, key: typing.Any) -> bool: - return key in self._data - - def __getitem__(self, key: str) -> typing.Any: - return self._data.__getitem__(key) - - def __setitem__(self, key: str, value: typing.Any) -> None: - self._data.__setitem__(key, value) - - def __delitem__(self, key: str) -> None: - self._data.__delitem__(key) - - def __iter__(self) -> typing.Iterator[typing.Any]: - return self._data.__iter__() - - def __len__(self) -> int: - return self._data.__len__() - - def __ne__(self, other: typing.Any) -> bool: - return not self.__eq__(other) - - def keys(self) -> typing.KeysView[str]: - return self._data.keys() - - def values(self) -> typing.ValuesView[typing.Any]: - return self._data.values() - - def items(self) -> typing.ItemsView[str, typing.Any]: - return self._data.items() - - def get(self, key: str, default: typing.Any = None) -> typing.Any: - try: - return self[key] - except KeyError: - return default - - @typing.overload - def pop(self, key: str) -> typing.Any: ... - - @typing.overload - def pop(self, key: str, default: _T) -> _T: ... - - @typing.overload - def pop(self, key: str, default: typing.Any) -> typing.Any: ... - - def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: - if default is _UNSET: - return self._data.pop(key) - return self._data.pop(key, default) - - def popitem(self) -> typing.Tuple[str, typing.Any]: - return self._data.popitem() - - def clear(self) -> None: - self._data.clear() - - def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: - self._data.update(*args, **kwargs) - - @typing.overload - def setdefault(self, key: str, default: None = None) -> None: ... - - @typing.overload - def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... - - def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: - if default is _UNSET: - return self._data.setdefault(key) - return self._data.setdefault(key, default) - - def __eq__(self, other: typing.Any) -> bool: - try: - other_model = self.__class__(other) - except Exception: - return False - return self._data == other_model._data - - def __repr__(self) -> str: - return str(self._data) - - -def _is_model(obj: typing.Any) -> bool: - return getattr(obj, "_is_model", False) - - -def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements - if isinstance(o, list): - return [_serialize(x, format) for x in o] - if isinstance(o, dict): - return {k: _serialize(v, format) for k, v in o.items()} - if isinstance(o, set): - return {_serialize(x, format) for x in o} - if isinstance(o, tuple): - return tuple(_serialize(x, format) for x in o) - if isinstance(o, (bytes, bytearray)): - return _serialize_bytes(o, format) - if isinstance(o, decimal.Decimal): - return float(o) - if isinstance(o, enum.Enum): - return o.value - if isinstance(o, int): - if format == "str": - return str(o) - return o - try: - # First try datetime.datetime - return _serialize_datetime(o, format) - except AttributeError: - pass - # Last, try datetime.timedelta - try: - return _timedelta_as_isostr(o) - except AttributeError: - # This will be raised when it hits value.total_seconds in the method above - pass - return o - - -def _get_rest_field( - attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str -) -> typing.Optional["_RestField"]: - try: - return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) - except StopIteration: - return None - - -def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: - if not rf: - return _serialize(value, None) - if rf._is_multipart_file_input: - return value - if rf._is_model: - return _deserialize(rf._type, value) - if isinstance(value, ET.Element): - value = _deserialize(rf._type, value) - return _serialize(value, rf._format) - - -class Model(_MyMutableMapping): - _is_model = True - # label whether current class's _attr_to_rest_field has been calculated - # could not see _attr_to_rest_field directly because subclass inherits it from parent class - _calculated: typing.Set[str] = set() - - def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: - class_name = self.__class__.__name__ - if len(args) > 1: - raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") - dict_to_pass = { - rest_field._rest_name: rest_field._default - for rest_field in self._attr_to_rest_field.values() - if rest_field._default is not _UNSET - } - if args: # pylint: disable=too-many-nested-blocks - if isinstance(args[0], ET.Element): - existed_attr_keys = [] - model_meta = getattr(self, "_xml", {}) - - for rf in self._attr_to_rest_field.values(): - prop_meta = getattr(rf, "_xml", {}) - xml_name = prop_meta.get("name", rf._rest_name) - xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) - if xml_ns: - xml_name = "{" + xml_ns + "}" + xml_name - - # attribute - if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None: - existed_attr_keys.append(xml_name) - dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name)) - continue - - # unwrapped element is array - if prop_meta.get("unwrapped", False): - # unwrapped array could either use prop items meta/prop meta - if prop_meta.get("itemsName"): - xml_name = prop_meta.get("itemsName") - xml_ns = prop_meta.get("itemNs") - if xml_ns: - xml_name = "{" + xml_ns + "}" + xml_name - items = args[0].findall(xml_name) # pyright: ignore - if len(items) > 0: - existed_attr_keys.append(xml_name) - dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) - continue - - # text element is primitive type - if prop_meta.get("text", False): - if args[0].text is not None: - dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text) - continue - - # wrapped element could be normal property or array, it should only have one element - item = args[0].find(xml_name) - if item is not None: - existed_attr_keys.append(xml_name) - dict_to_pass[rf._rest_name] = _deserialize(rf._type, item) - - # rest thing is additional properties - for e in args[0]: - if e.tag not in existed_attr_keys: - dict_to_pass[e.tag] = _convert_element(e) - else: - dict_to_pass.update( - {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} - ) - else: - non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] - if non_attr_kwargs: - # actual type errors only throw the first wrong keyword arg they see, so following that. - raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") - dict_to_pass.update( - { - self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) - for k, v in kwargs.items() - if v is not None - } - ) - super().__init__(dict_to_pass) - - def copy(self) -> "Model": - return Model(self.__dict__) - - def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: - if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: - # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', - # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' - mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order - attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property - k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") - } - annotations = { - k: v - for mro_class in mros - if hasattr(mro_class, "__annotations__") - for k, v in mro_class.__annotations__.items() - } - for attr, rf in attr_to_rest_field.items(): - rf._module = cls.__module__ - if not rf._type: - rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) - if not rf._rest_name_input: - rf._rest_name_input = attr - cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) - cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") - - return super().__new__(cls) # pylint: disable=no-value-for-parameter - - def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: - for base in cls.__bases__: - if hasattr(base, "__mapping__"): - base.__mapping__[discriminator or cls.__name__] = cls # type: ignore - - @classmethod - def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: - for v in cls.__dict__.values(): - if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators: - return v - return None - - @classmethod - def _deserialize(cls, data, exist_discriminators): - if not hasattr(cls, "__mapping__"): - return cls(data) - discriminator = cls._get_discriminator(exist_discriminators) - if discriminator is None: - return cls(data) - exist_discriminators.append(discriminator._rest_name) - if isinstance(data, ET.Element): - model_meta = getattr(cls, "_xml", {}) - prop_meta = getattr(discriminator, "_xml", {}) - xml_name = prop_meta.get("name", discriminator._rest_name) - xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) - if xml_ns: - xml_name = "{" + xml_ns + "}" + xml_name - - if data.get(xml_name) is not None: - discriminator_value = data.get(xml_name) - else: - discriminator_value = data.find(xml_name).text # pyright: ignore - else: - discriminator_value = data.get(discriminator._rest_name) - mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore - return mapped_cls._deserialize(data, exist_discriminators) - - def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: - """Return a dict that can be turned into json using json.dump. - - :keyword bool exclude_readonly: Whether to remove the readonly properties. - :returns: A dict JSON compatible object - :rtype: dict - """ - - result = {} - readonly_props = [] - if exclude_readonly: - readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] - for k, v in self.items(): - if exclude_readonly and k in readonly_props: # pyright: ignore - continue - is_multipart_file_input = False - try: - is_multipart_file_input = next( - rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k - )._is_multipart_file_input - except StopIteration: - pass - result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) - return result - - @staticmethod - def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: - if v is None or isinstance(v, _Null): - return None - if isinstance(v, (list, tuple, set)): - return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) - if isinstance(v, dict): - return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} - return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v - - -def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): - if _is_model(obj): - return obj - return _deserialize(model_deserializer, obj) - - -def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): - if obj is None: - return obj - return _deserialize_with_callable(if_obj_deserializer, obj) - - -def _deserialize_with_union(deserializers, obj): - for deserializer in deserializers: - try: - return _deserialize(deserializer, obj) - except DeserializationError: - pass - raise DeserializationError() - - -def _deserialize_dict( - value_deserializer: typing.Optional[typing.Callable], - module: typing.Optional[str], - obj: typing.Dict[typing.Any, typing.Any], -): - if obj is None: - return obj - if isinstance(obj, ET.Element): - obj = {child.tag: child for child in obj} - return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} - - -def _deserialize_multiple_sequence( - entry_deserializers: typing.List[typing.Optional[typing.Callable]], - module: typing.Optional[str], - obj, -): - if obj is None: - return obj - return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) - - -def _deserialize_sequence( - deserializer: typing.Optional[typing.Callable], - module: typing.Optional[str], - obj, -): - if obj is None: - return obj - if isinstance(obj, ET.Element): - obj = list(obj) - return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) - - -def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.Any]: - return sorted( - types, - key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), - ) - - -def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-branches - annotation: typing.Any, - module: typing.Optional[str], - rf: typing.Optional["_RestField"] = None, -) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: - if not annotation: - return None - - # is it a type alias? - if isinstance(annotation, str): - if module is not None: - annotation = _get_type_alias_type(module, annotation) - - # is it a forward ref / in quotes? - if isinstance(annotation, (str, typing.ForwardRef)): - try: - model_name = annotation.__forward_arg__ # type: ignore - except AttributeError: - model_name = annotation - if module is not None: - annotation = _get_model(module, model_name) - - try: - if module and _is_model(annotation): - if rf: - rf._is_model = True - - return functools.partial(_deserialize_model, annotation) # pyright: ignore - except Exception: - pass - - # is it a literal? - try: - if annotation.__origin__ is typing.Literal: # pyright: ignore - return None - except AttributeError: - pass - - # is it optional? - try: - if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore - if len(annotation.__args__) <= 2: # pyright: ignore - if_obj_deserializer = _get_deserialize_callable_from_annotation( - next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore - ) - - return functools.partial(_deserialize_with_optional, if_obj_deserializer) - # the type is Optional[Union[...]], we need to remove the None type from the Union - annotation_copy = copy.copy(annotation) - annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore - return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) - except AttributeError: - pass - - # is it union? - if getattr(annotation, "__origin__", None) is typing.Union: - # initial ordering is we make `string` the last deserialization option, because it is often them most generic - deserializers = [ - _get_deserialize_callable_from_annotation(arg, module, rf) - for arg in _sorted_annotations(annotation.__args__) # pyright: ignore - ] - - return functools.partial(_deserialize_with_union, deserializers) - - try: - if annotation._name == "Dict": # pyright: ignore - value_deserializer = _get_deserialize_callable_from_annotation( - annotation.__args__[1], module, rf # pyright: ignore - ) - - return functools.partial( - _deserialize_dict, - value_deserializer, - module, - ) - except (AttributeError, IndexError): - pass - try: - if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore - if len(annotation.__args__) > 1: # pyright: ignore - entry_deserializers = [ - _get_deserialize_callable_from_annotation(dt, module, rf) - for dt in annotation.__args__ # pyright: ignore - ] - return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) - deserializer = _get_deserialize_callable_from_annotation( - annotation.__args__[0], module, rf # pyright: ignore - ) - - return functools.partial(_deserialize_sequence, deserializer, module) - except (TypeError, IndexError, AttributeError, SyntaxError): - pass - - def _deserialize_default( - deserializer, - obj, - ): - if obj is None: - return obj - try: - return _deserialize_with_callable(deserializer, obj) - except Exception: - pass - return obj - - if get_deserializer(annotation, rf): - return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) - - return functools.partial(_deserialize_default, annotation) - - -def _deserialize_with_callable( - deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], - value: typing.Any, -): # pylint: disable=too-many-return-statements - try: - if value is None or isinstance(value, _Null): - return None - if isinstance(value, ET.Element): - if deserializer is str: - return value.text or "" - if deserializer is int: - return int(value.text) if value.text else None - if deserializer is float: - return float(value.text) if value.text else None - if deserializer is bool: - return value.text == "true" if value.text else None - if deserializer is None: - return value - if deserializer in [int, float, bool]: - return deserializer(value) - if isinstance(deserializer, CaseInsensitiveEnumMeta): - try: - return deserializer(value) - except ValueError: - # for unknown value, return raw value - return value - if isinstance(deserializer, type) and issubclass(deserializer, Model): - return deserializer._deserialize(value, []) - return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) - except Exception as e: - raise DeserializationError() from e - - -def _deserialize( - deserializer: typing.Any, - value: typing.Any, - module: typing.Optional[str] = None, - rf: typing.Optional["_RestField"] = None, - format: typing.Optional[str] = None, -) -> typing.Any: - if isinstance(value, PipelineResponse): - value = value.http_response.json() - if rf is None and format: - rf = _RestField(format=format) - if not isinstance(deserializer, functools.partial): - deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) - return _deserialize_with_callable(deserializer, value) - - -class _RestField: - def __init__( - self, - *, - name: typing.Optional[str] = None, - type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin - is_discriminator: bool = False, - visibility: typing.Optional[typing.List[str]] = None, - default: typing.Any = _UNSET, - format: typing.Optional[str] = None, - is_multipart_file_input: bool = False, - xml: typing.Optional[typing.Dict[str, typing.Any]] = None, - ): - self._type = type - self._rest_name_input = name - self._module: typing.Optional[str] = None - self._is_discriminator = is_discriminator - self._visibility = visibility - self._is_model = False - self._default = default - self._format = format - self._is_multipart_file_input = is_multipart_file_input - self._xml = xml if xml is not None else {} - - @property - def _class_type(self) -> typing.Any: - return getattr(self._type, "args", [None])[0] - - @property - def _rest_name(self) -> str: - if self._rest_name_input is None: - raise ValueError("Rest name was never set") - return self._rest_name_input - - def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin - # by this point, type and rest_name will have a value bc we default - # them in __new__ of the Model class - item = obj.get(self._rest_name) - if item is None: - return item - if self._is_model: - return item - return _deserialize(self._type, _serialize(item, self._format), rf=self) - - def __set__(self, obj: Model, value) -> None: - if value is None: - # we want to wipe out entries if users set attr to None - try: - obj.__delitem__(self._rest_name) - except KeyError: - pass - return - if self._is_model: - if not _is_model(value): - value = _deserialize(self._type, value) - obj.__setitem__(self._rest_name, value) - return - obj.__setitem__(self._rest_name, _serialize(value, self._format)) - - def _get_deserialize_callable_from_annotation( - self, annotation: typing.Any - ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: - return _get_deserialize_callable_from_annotation(annotation, self._module, self) - - -def rest_field( - *, - name: typing.Optional[str] = None, - type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin - visibility: typing.Optional[typing.List[str]] = None, - default: typing.Any = _UNSET, - format: typing.Optional[str] = None, - is_multipart_file_input: bool = False, - xml: typing.Optional[typing.Dict[str, typing.Any]] = None, -) -> typing.Any: - return _RestField( - name=name, - type=type, - visibility=visibility, - default=default, - format=format, - is_multipart_file_input=is_multipart_file_input, - xml=xml, - ) - - -def rest_discriminator( - *, - name: typing.Optional[str] = None, - type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin - visibility: typing.Optional[typing.List[str]] = None, - xml: typing.Optional[typing.Dict[str, typing.Any]] = None, -) -> typing.Any: - return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) - - -def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: - """Serialize a model to XML. - - :param Model model: The model to serialize. - :param bool exclude_readonly: Whether to exclude readonly properties. - :returns: The XML representation of the model. - :rtype: str - """ - return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore - - -def _get_element( - o: typing.Any, - exclude_readonly: bool = False, - parent_meta: typing.Optional[typing.Dict[str, typing.Any]] = None, - wrapped_element: typing.Optional[ET.Element] = None, -) -> typing.Union[ET.Element, typing.List[ET.Element]]: - if _is_model(o): - model_meta = getattr(o, "_xml", {}) - - # if prop is a model, then use the prop element directly, else generate a wrapper of model - if wrapped_element is None: - wrapped_element = _create_xml_element( - model_meta.get("name", o.__class__.__name__), - model_meta.get("prefix"), - model_meta.get("ns"), - ) - - readonly_props = [] - if exclude_readonly: - readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] - - for k, v in o.items(): - # do not serialize readonly properties - if exclude_readonly and k in readonly_props: - continue - - prop_rest_field = _get_rest_field(o._attr_to_rest_field, k) - if prop_rest_field: - prop_meta = getattr(prop_rest_field, "_xml").copy() - # use the wire name as xml name if no specific name is set - if prop_meta.get("name") is None: - prop_meta["name"] = k - else: - # additional properties will not have rest field, use the wire name as xml name - prop_meta = {"name": k} - - # if no ns for prop, use model's - if prop_meta.get("ns") is None and model_meta.get("ns"): - prop_meta["ns"] = model_meta.get("ns") - prop_meta["prefix"] = model_meta.get("prefix") - - if prop_meta.get("unwrapped", False): - # unwrapped could only set on array - wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta)) - elif prop_meta.get("text", False): - # text could only set on primitive type - wrapped_element.text = _get_primitive_type_value(v) - elif prop_meta.get("attribute", False): - xml_name = prop_meta.get("name", k) - if prop_meta.get("ns"): - ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore - xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore - # attribute should be primitive type - wrapped_element.set(xml_name, _get_primitive_type_value(v)) - else: - # other wrapped prop element - wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta)) - return wrapped_element - if isinstance(o, list): - return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore - if isinstance(o, dict): - result = [] - for k, v in o.items(): - result.append( - _get_wrapped_element( - v, - exclude_readonly, - { - "name": k, - "ns": parent_meta.get("ns") if parent_meta else None, - "prefix": parent_meta.get("prefix") if parent_meta else None, - }, - ) - ) - return result - - # primitive case need to create element based on parent_meta - if parent_meta: - return _get_wrapped_element( - o, - exclude_readonly, - { - "name": parent_meta.get("itemsName", parent_meta.get("name")), - "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")), - "ns": parent_meta.get("itemsNs", parent_meta.get("ns")), - }, - ) - - raise ValueError("Could not serialize value into xml: " + o) - - -def _get_wrapped_element( - v: typing.Any, - exclude_readonly: bool, - meta: typing.Optional[typing.Dict[str, typing.Any]], -) -> ET.Element: - wrapped_element = _create_xml_element( - meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None - ) - if isinstance(v, (dict, list)): - wrapped_element.extend(_get_element(v, exclude_readonly, meta)) - elif _is_model(v): - _get_element(v, exclude_readonly, meta, wrapped_element) - else: - wrapped_element.text = _get_primitive_type_value(v) - return wrapped_element - - -def _get_primitive_type_value(v) -> str: - if v is True: - return "true" - if v is False: - return "false" - if isinstance(v, _Null): - return "" - return str(v) - - -def _create_xml_element(tag, prefix=None, ns=None): - if prefix and ns: - ET.register_namespace(prefix, ns) - if ns: - return ET.Element("{" + ns + "}" + tag) - return ET.Element(tag) - - -def _deserialize_xml( - deserializer: typing.Any, - value: str, -) -> typing.Any: - element = ET.fromstring(value) # nosec - return _deserialize(deserializer, element) - - -def _convert_element(e: ET.Element): - # dict case - if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: - dict_result: typing.Dict[str, typing.Any] = {} - for child in e: - if dict_result.get(child.tag) is not None: - if isinstance(dict_result[child.tag], list): - dict_result[child.tag].append(_convert_element(child)) - else: - dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)] - else: - dict_result[child.tag] = _convert_element(child) - dict_result.update(e.attrib) - return dict_result - # array case - if len(e) > 0: - array_result: typing.List[typing.Any] = [] - for child in e: - array_result.append(_convert_element(child)) - return array_result - # primitive case - return e.text diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/_patch.py deleted file mode 100644 index 53c3c5b6697b..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/_patch.py +++ /dev/null @@ -1,246 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -import uuid -from os import PathLike -from pathlib import Path -from typing import List, Any, Union, Dict -from typing_extensions import Self -from azure.core.credentials import TokenCredential -from azure.core import PipelineClient -from azure.core.pipeline import policies -from ._configuration import AIProjectClientConfiguration -from ._serialization import Deserializer, Serializer -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations -from ._client import AIProjectClient as ClientGenerated -from .operations._patch import InferenceOperations - - -class AIProjectClient(ClientGenerated): - - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "TokenCredential", - **kwargs: Any, - ) -> None: - # TODO: Validate input formats with regex match (e.g. subscription ID) - if not endpoint: - raise ValueError("endpoint is required") - if not subscription_id: - raise ValueError("subscription_id ID is required") - if not resource_group_name: - raise ValueError("resource_group_name is required") - if not project_name: - raise ValueError("project_name is required") - if not credential: - raise ValueError("Credential is required") - if "api_version" in kwargs: - raise ValueError("No support for overriding the API version") - if "credential_scopes" in kwargs: - raise ValueError("No support for overriding the credential scopes") - - kwargs1 = kwargs.copy() - kwargs2 = kwargs.copy() - kwargs3 = kwargs.copy() - - # For Endpoints operations (enumerating connections, getting SAS tokens) - _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config1 = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", - credential_scopes=["https://management.azure.com"], - **kwargs1, - ) - _policies1 = kwargs1.pop("policies", None) - if _policies1 is None: - _policies1 = [ - policies.RequestIdPolicy(**kwargs1), - self._config1.headers_policy, - self._config1.user_agent_policy, - self._config1.proxy_policy, - policies.ContentDecodePolicy(**kwargs1), - self._config1.redirect_policy, - self._config1.retry_policy, - self._config1.authentication_policy, - self._config1.custom_hook_policy, - self._config1.logging_policy, - policies.DistributedTracingPolicy(**kwargs1), - policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, - self._config1.http_logging_policy, - ] - self._client1 = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) - - # For Agents operations - _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config2 = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://ml.azure.com"], - **kwargs2, - ) - _policies2 = kwargs2.pop("policies", None) - if _policies2 is None: - _policies2 = [ - policies.RequestIdPolicy(**kwargs2), - self._config2.headers_policy, - self._config2.user_agent_policy, - self._config2.proxy_policy, - policies.ContentDecodePolicy(**kwargs2), - self._config2.redirect_policy, - self._config2.retry_policy, - self._config2.authentication_policy, - self._config2.custom_hook_policy, - self._config2.logging_policy, - policies.DistributedTracingPolicy(**kwargs2), - policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, - self._config2.http_logging_policy, - ] - self._client2 = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) - - # For Cloud Evaluations operations - _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config3 = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://ml.azure.com"], # TODO: Update once service changes are ready - **kwargs3, - ) - _policies3 = kwargs3.pop("policies", None) - if _policies3 is None: - _policies3 = [ - policies.RequestIdPolicy(**kwargs3), - self._config3.headers_policy, - self._config3.user_agent_policy, - self._config3.proxy_policy, - policies.ContentDecodePolicy(**kwargs3), - self._config3.redirect_policy, - self._config3.retry_policy, - self._config3.authentication_policy, - self._config3.custom_hook_policy, - self._config3.logging_policy, - policies.DistributedTracingPolicy(**kwargs3), - policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, - self._config3.http_logging_policy, - ] - self._client3 = PipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - - self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) - self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) - self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) - self.inference = InferenceOperations(self) - - def close(self) -> None: - self._client1.close() - self._client2.close() - self._client3.close() - - def __enter__(self) -> Self: - self._client1.__enter__() - self._client2.__enter__() - self._client3.__enter__() - return self - - def __exit__(self, *exc_details: Any) -> None: - self._client1.__exit__(*exc_details) - self._client2.__exit__(*exc_details) - self._client3.__exit__(*exc_details) - - @classmethod - def from_connection_string(cls, conn_str: str, credential: "TokenCredential", **kwargs) -> "AIProjectClient": - """ - Create an AIProjectClient from a connection string. - - :param conn_str: The connection string, copied from your AI Studio project. - """ - if not conn_str: - raise ValueError("Connection string is required") - parts = conn_str.split(";") - if len(parts) != 4: - raise ValueError("Invalid connection string format") - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) - - def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: - """Upload a file to the Azure AI Studio project. - This method required *azure-ai-ml* to be installed. - - :param file_path: The path to the file to upload. - :type file_path: Union[str, Path, PathLike] - :return: The asset id of uploaded file. - :rtype: str - """ - try: - from azure.ai.ml import MLClient - from azure.ai.ml.entities import Data - from azure.ai.ml.constants import AssetTypes - except ImportError: - raise ImportError( - "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`") - - data = Data( - path=file_path, - type=AssetTypes.URI_FILE, - name=str(uuid.uuid4()), # generating random name - is_anonymous=True, - version="1", - ) - - ml_client = MLClient( - self._config3.credential, - self._config3.subscription_id, - self._config3.resource_group_name, - self._config3.project_name, - ) - - data_asset = ml_client.data.create_or_update(data) - - return data_asset.id - - @property - def scope(self) -> Dict[str, str]: - return { - "subscription_id": self._config3.subscription_id, - "resource_group_name": self._config3.resource_group_name, - "project_name": self._config3.project_name, - } - -__all__: List[str] = [ - "AIProjectClient", -] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_serialization.py b/sdk/ai/azure-ai-project/azure/ai/project/_serialization.py deleted file mode 100644 index ce17d1798ce7..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/_serialization.py +++ /dev/null @@ -1,2114 +0,0 @@ -# pylint: disable=too-many-lines -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- - -# pyright: reportUnnecessaryTypeIgnoreComment=false - -from base64 import b64decode, b64encode -import calendar -import datetime -import decimal -import email -from enum import Enum -import json -import logging -import re -import sys -import codecs -from typing import ( - Dict, - Any, - cast, - Optional, - Union, - AnyStr, - IO, - Mapping, - Callable, - TypeVar, - MutableMapping, - Type, - List, -) - -try: - from urllib import quote # type: ignore -except ImportError: - from urllib.parse import quote -import xml.etree.ElementTree as ET - -import isodate # type: ignore - -from azure.core.exceptions import DeserializationError, SerializationError -from azure.core.serialization import NULL as CoreNull - -_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") - -ModelType = TypeVar("ModelType", bound="Model") -JSON = MutableMapping[str, Any] - - -class RawDeserializer: - - # Accept "text" because we're open minded people... - JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") - - # Name used in context - CONTEXT_NAME = "deserialized_data" - - @classmethod - def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: - """Decode data according to content-type. - - Accept a stream of data as well, but will be load at once in memory for now. - - If no content-type, will return the string version (not bytes, not stream) - - :param data: Input, could be bytes or stream (will be decoded with UTF8) or text - :type data: str or bytes or IO - :param str content_type: The content type. - :return: The deserialized data. - :rtype: object - """ - if hasattr(data, "read"): - # Assume a stream - data = cast(IO, data).read() - - if isinstance(data, bytes): - data_as_str = data.decode(encoding="utf-8-sig") - else: - # Explain to mypy the correct type. - data_as_str = cast(str, data) - - # Remove Byte Order Mark if present in string - data_as_str = data_as_str.lstrip(_BOM) - - if content_type is None: - return data - - if cls.JSON_REGEXP.match(content_type): - try: - return json.loads(data_as_str) - except ValueError as err: - raise DeserializationError("JSON is invalid: {}".format(err), err) from err - elif "xml" in (content_type or []): - try: - - try: - if isinstance(data, unicode): # type: ignore - # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string - data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore - except NameError: - pass - - return ET.fromstring(data_as_str) # nosec - except ET.ParseError as err: - # It might be because the server has an issue, and returned JSON with - # content-type XML.... - # So let's try a JSON load, and if it's still broken - # let's flow the initial exception - def _json_attemp(data): - try: - return True, json.loads(data) - except ValueError: - return False, None # Don't care about this one - - success, json_result = _json_attemp(data) - if success: - return json_result - # If i'm here, it's not JSON, it's not XML, let's scream - # and raise the last context in this block (the XML exception) - # The function hack is because Py2.7 messes up with exception - # context otherwise. - _LOGGER.critical("Wasn't XML not JSON, failing") - raise DeserializationError("XML is invalid") from err - elif content_type.startswith("text/"): - return data_as_str - raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) - - @classmethod - def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: - """Deserialize from HTTP response. - - Use bytes and headers to NOT use any requests/aiohttp or whatever - specific implementation. - Headers will tested for "content-type" - - :param bytes body_bytes: The body of the response. - :param dict headers: The headers of the response. - :returns: The deserialized data. - :rtype: object - """ - # Try to use content-type from headers if available - content_type = None - if "content-type" in headers: - content_type = headers["content-type"].split(";")[0].strip().lower() - # Ouch, this server did not declare what it sent... - # Let's guess it's JSON... - # Also, since Autorest was considering that an empty body was a valid JSON, - # need that test as well.... - else: - content_type = "application/json" - - if body_bytes: - return cls.deserialize_from_text(body_bytes, content_type) - return None - - -_LOGGER = logging.getLogger(__name__) - -try: - _long_type = long # type: ignore -except NameError: - _long_type = int - - -class UTC(datetime.tzinfo): - """Time Zone info for handling UTC""" - - def utcoffset(self, dt): - """UTF offset for UTC is 0. - - :param datetime.datetime dt: The datetime - :returns: The offset - :rtype: datetime.timedelta - """ - return datetime.timedelta(0) - - def tzname(self, dt): - """Timestamp representation. - - :param datetime.datetime dt: The datetime - :returns: The timestamp representation - :rtype: str - """ - return "Z" - - def dst(self, dt): - """No daylight saving for UTC. - - :param datetime.datetime dt: The datetime - :returns: The daylight saving time - :rtype: datetime.timedelta - """ - return datetime.timedelta(hours=1) - - -try: - from datetime import timezone as _FixedOffset # type: ignore -except ImportError: # Python 2.7 - - class _FixedOffset(datetime.tzinfo): # type: ignore - """Fixed offset in minutes east from UTC. - Copy/pasted from Python doc - :param datetime.timedelta offset: offset in timedelta format - """ - - def __init__(self, offset) -> None: - self.__offset = offset - - def utcoffset(self, dt): - return self.__offset - - def tzname(self, dt): - return str(self.__offset.total_seconds() / 3600) - - def __repr__(self): - return "".format(self.tzname(None)) - - def dst(self, dt): - return datetime.timedelta(0) - - def __getinitargs__(self): - return (self.__offset,) - - -try: - from datetime import timezone - - TZ_UTC = timezone.utc -except ImportError: - TZ_UTC = UTC() # type: ignore - -_FLATTEN = re.compile(r"(? None: - self.additional_properties: Optional[Dict[str, Any]] = {} - for k in kwargs: # pylint: disable=consider-using-dict-items - if k not in self._attribute_map: - _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) - elif k in self._validation and self._validation[k].get("readonly", False): - _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) - else: - setattr(self, k, kwargs[k]) - - def __eq__(self, other: Any) -> bool: - """Compare objects by comparing all attributes. - - :param object other: The object to compare - :returns: True if objects are equal - :rtype: bool - """ - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other: Any) -> bool: - """Compare objects by comparing all attributes. - - :param object other: The object to compare - :returns: True if objects are not equal - :rtype: bool - """ - return not self.__eq__(other) - - def __str__(self) -> str: - return str(self.__dict__) - - @classmethod - def enable_additional_properties_sending(cls) -> None: - cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} - - @classmethod - def is_xml_model(cls) -> bool: - try: - cls._xml_map # type: ignore - except AttributeError: - return False - return True - - @classmethod - def _create_xml_node(cls): - """Create XML node. - - :returns: The XML node - :rtype: xml.etree.ElementTree.Element - """ - try: - xml_map = cls._xml_map # type: ignore - except AttributeError: - xml_map = {} - - return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) - - def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: - """Return the JSON that would be sent to server from this model. - - This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. - - If you want XML serialization, you can pass the kwargs is_xml=True. - - :param bool keep_readonly: If you want to serialize the readonly attributes - :returns: A dict JSON compatible object - :rtype: dict - """ - serializer = Serializer(self._infer_class_models()) - return serializer._serialize( # type: ignore # pylint: disable=protected-access - self, keep_readonly=keep_readonly, **kwargs - ) - - def as_dict( - self, - keep_readonly: bool = True, - key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, - **kwargs: Any - ) -> JSON: - """Return a dict that can be serialized using json.dump. - - Advanced usage might optionally use a callback as parameter: - - .. code::python - - def my_key_transformer(key, attr_desc, value): - return key - - Key is the attribute name used in Python. Attr_desc - is a dict of metadata. Currently contains 'type' with the - msrest type and 'key' with the RestAPI encoded key. - Value is the current value in this object. - - The string returned will be used to serialize the key. - If the return type is a list, this is considered hierarchical - result dict. - - See the three examples in this file: - - - attribute_transformer - - full_restapi_key_transformer - - last_restapi_key_transformer - - If you want XML serialization, you can pass the kwargs is_xml=True. - - :param bool keep_readonly: If you want to serialize the readonly attributes - :param function key_transformer: A key transformer function. - :returns: A dict JSON compatible object - :rtype: dict - """ - serializer = Serializer(self._infer_class_models()) - return serializer._serialize( # type: ignore # pylint: disable=protected-access - self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs - ) - - @classmethod - def _infer_class_models(cls): - try: - str_models = cls.__module__.rsplit(".", 1)[0] - models = sys.modules[str_models] - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - if cls.__name__ not in client_models: - raise ValueError("Not Autorest generated code") - except Exception: # pylint: disable=broad-exception-caught - # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. - client_models = {cls.__name__: cls} - return client_models - - @classmethod - def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: - """Parse a str using the RestAPI syntax and return a model. - - :param str data: A str using RestAPI structure. JSON by default. - :param str content_type: JSON by default, set application/xml if XML. - :returns: An instance of this model - :raises: DeserializationError if something went wrong - :rtype: ModelType - """ - deserializer = Deserializer(cls._infer_class_models()) - return deserializer(cls.__name__, data, content_type=content_type) # type: ignore - - @classmethod - def from_dict( - cls: Type[ModelType], - data: Any, - key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, - content_type: Optional[str] = None, - ) -> ModelType: - """Parse a dict using given key extractor return a model. - - By default consider key - extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor - and last_rest_key_case_insensitive_extractor) - - :param dict data: A dict using RestAPI structure - :param function key_extractors: A key extractor function. - :param str content_type: JSON by default, set application/xml if XML. - :returns: An instance of this model - :raises: DeserializationError if something went wrong - :rtype: ModelType - """ - deserializer = Deserializer(cls._infer_class_models()) - deserializer.key_extractors = ( # type: ignore - [ # type: ignore - attribute_key_case_insensitive_extractor, - rest_key_case_insensitive_extractor, - last_rest_key_case_insensitive_extractor, - ] - if key_extractors is None - else key_extractors - ) - return deserializer(cls.__name__, data, content_type=content_type) # type: ignore - - @classmethod - def _flatten_subtype(cls, key, objects): - if "_subtype_map" not in cls.__dict__: - return {} - result = dict(cls._subtype_map[key]) - for valuetype in cls._subtype_map[key].values(): - result.update(objects[valuetype]._flatten_subtype(key, objects)) # pylint: disable=protected-access - return result - - @classmethod - def _classify(cls, response, objects): - """Check the class _subtype_map for any child classes. - We want to ignore any inherited _subtype_maps. - - :param dict response: The initial data - :param dict objects: The class objects - :returns: The class to be used - :rtype: class - """ - for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): - subtype_value = None - - if not isinstance(response, ET.Element): - rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] - subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None) - else: - subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) - if subtype_value: - # Try to match base class. Can be class name only - # (bug to fix in Autorest to support x-ms-discriminator-name) - if cls.__name__ == subtype_value: - return cls - flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) - try: - return objects[flatten_mapping_type[subtype_value]] # type: ignore - except KeyError: - _LOGGER.warning( - "Subtype value %s has no mapping, use base class %s.", - subtype_value, - cls.__name__, - ) - break - else: - _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) - break - return cls - - @classmethod - def _get_rest_key_parts(cls, attr_key): - """Get the RestAPI key of this attr, split it and decode part - :param str attr_key: Attribute key must be in attribute_map. - :returns: A list of RestAPI part - :rtype: list - """ - rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) - return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] - - -def _decode_attribute_map_key(key): - """This decode a key in an _attribute_map to the actual key we want to look at - inside the received data. - - :param str key: A key string from the generated code - :returns: The decoded key - :rtype: str - """ - return key.replace("\\.", ".") - - -class Serializer(object): # pylint: disable=too-many-public-methods - """Request object model serializer.""" - - basic_types = {str: "str", int: "int", bool: "bool", float: "float"} - - _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} - days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} - months = { - 1: "Jan", - 2: "Feb", - 3: "Mar", - 4: "Apr", - 5: "May", - 6: "Jun", - 7: "Jul", - 8: "Aug", - 9: "Sep", - 10: "Oct", - 11: "Nov", - 12: "Dec", - } - validation = { - "min_length": lambda x, y: len(x) < y, - "max_length": lambda x, y: len(x) > y, - "minimum": lambda x, y: x < y, - "maximum": lambda x, y: x > y, - "minimum_ex": lambda x, y: x <= y, - "maximum_ex": lambda x, y: x >= y, - "min_items": lambda x, y: len(x) < y, - "max_items": lambda x, y: len(x) > y, - "pattern": lambda x, y: not re.match(y, x, re.UNICODE), - "unique": lambda x, y: len(x) != len(set(x)), - "multiple": lambda x, y: x % y != 0, - } - - def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: - self.serialize_type = { - "iso-8601": Serializer.serialize_iso, - "rfc-1123": Serializer.serialize_rfc, - "unix-time": Serializer.serialize_unix, - "duration": Serializer.serialize_duration, - "date": Serializer.serialize_date, - "time": Serializer.serialize_time, - "decimal": Serializer.serialize_decimal, - "long": Serializer.serialize_long, - "bytearray": Serializer.serialize_bytearray, - "base64": Serializer.serialize_base64, - "object": self.serialize_object, - "[]": self.serialize_iter, - "{}": self.serialize_dict, - } - self.dependencies: Dict[str, type] = dict(classes) if classes else {} - self.key_transformer = full_restapi_key_transformer - self.client_side_validation = True - - def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals - self, target_obj, data_type=None, **kwargs - ): - """Serialize data into a string according to type. - - :param object target_obj: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str, dict - :raises: SerializationError if serialization fails. - :returns: The serialized data. - """ - key_transformer = kwargs.get("key_transformer", self.key_transformer) - keep_readonly = kwargs.get("keep_readonly", False) - if target_obj is None: - return None - - attr_name = None - class_name = target_obj.__class__.__name__ - - if data_type: - return self.serialize_data(target_obj, data_type, **kwargs) - - if not hasattr(target_obj, "_attribute_map"): - data_type = type(target_obj).__name__ - if data_type in self.basic_types.values(): - return self.serialize_data(target_obj, data_type, **kwargs) - - # Force "is_xml" kwargs if we detect a XML model - try: - is_xml_model_serialization = kwargs["is_xml"] - except KeyError: - is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) - - serialized = {} - if is_xml_model_serialization: - serialized = target_obj._create_xml_node() # pylint: disable=protected-access - try: - attributes = target_obj._attribute_map # pylint: disable=protected-access - for attr, attr_desc in attributes.items(): - attr_name = attr - if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access - attr_name, {} - ).get("readonly", False): - continue - - if attr_name == "additional_properties" and attr_desc["key"] == "": - if target_obj.additional_properties is not None: - serialized.update(target_obj.additional_properties) - continue - try: - - orig_attr = getattr(target_obj, attr) - if is_xml_model_serialization: - pass # Don't provide "transformer" for XML for now. Keep "orig_attr" - else: # JSON - keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) - keys = keys if isinstance(keys, list) else [keys] - - kwargs["serialization_ctxt"] = attr_desc - new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) - - if is_xml_model_serialization: - xml_desc = attr_desc.get("xml", {}) - xml_name = xml_desc.get("name", attr_desc["key"]) - xml_prefix = xml_desc.get("prefix", None) - xml_ns = xml_desc.get("ns", None) - if xml_desc.get("attr", False): - if xml_ns: - ET.register_namespace(xml_prefix, xml_ns) - xml_name = "{{{}}}{}".format(xml_ns, xml_name) - serialized.set(xml_name, new_attr) # type: ignore - continue - if xml_desc.get("text", False): - serialized.text = new_attr # type: ignore - continue - if isinstance(new_attr, list): - serialized.extend(new_attr) # type: ignore - elif isinstance(new_attr, ET.Element): - # If the down XML has no XML/Name, - # we MUST replace the tag with the local tag. But keeping the namespaces. - if "name" not in getattr(orig_attr, "_xml_map", {}): - splitted_tag = new_attr.tag.split("}") - if len(splitted_tag) == 2: # Namespace - new_attr.tag = "}".join([splitted_tag[0], xml_name]) - else: - new_attr.tag = xml_name - serialized.append(new_attr) # type: ignore - else: # That's a basic type - # Integrate namespace if necessary - local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) - local_node.text = str(new_attr) - serialized.append(local_node) # type: ignore - else: # JSON - for k in reversed(keys): # type: ignore - new_attr = {k: new_attr} - - _new_attr = new_attr - _serialized = serialized - for k in keys: # type: ignore - if k not in _serialized: - _serialized.update(_new_attr) # type: ignore - _new_attr = _new_attr[k] # type: ignore - _serialized = _serialized[k] - except ValueError as err: - if isinstance(err, SerializationError): - raise - - except (AttributeError, KeyError, TypeError) as err: - msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) - raise SerializationError(msg) from err - return serialized - - def body(self, data, data_type, **kwargs): - """Serialize data intended for a request body. - - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: dict - :raises: SerializationError if serialization fails. - :raises: ValueError if data is None - :returns: The serialized request body - """ - - # Just in case this is a dict - internal_data_type_str = data_type.strip("[]{}") - internal_data_type = self.dependencies.get(internal_data_type_str, None) - try: - is_xml_model_serialization = kwargs["is_xml"] - except KeyError: - if internal_data_type and issubclass(internal_data_type, Model): - is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) - else: - is_xml_model_serialization = False - if internal_data_type and not isinstance(internal_data_type, Enum): - try: - deserializer = Deserializer(self.dependencies) - # Since it's on serialization, it's almost sure that format is not JSON REST - # We're not able to deal with additional properties for now. - deserializer.additional_properties_detection = False - if is_xml_model_serialization: - deserializer.key_extractors = [ # type: ignore - attribute_key_case_insensitive_extractor, - ] - else: - deserializer.key_extractors = [ - rest_key_case_insensitive_extractor, - attribute_key_case_insensitive_extractor, - last_rest_key_case_insensitive_extractor, - ] - data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access - except DeserializationError as err: - raise SerializationError("Unable to build a model: " + str(err)) from err - - return self._serialize(data, data_type, **kwargs) - - def url(self, name, data, data_type, **kwargs): - """Serialize data intended for a URL path. - - :param str name: The name of the URL path parameter. - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str - :returns: The serialized URL path - :raises: TypeError if serialization fails. - :raises: ValueError if data is None - """ - try: - output = self.serialize_data(data, data_type, **kwargs) - if data_type == "bool": - output = json.dumps(output) - - if kwargs.get("skip_quote") is True: - output = str(output) - output = output.replace("{", quote("{")).replace("}", quote("}")) - else: - output = quote(str(output), safe="") - except SerializationError as exc: - raise TypeError("{} must be type {}.".format(name, data_type)) from exc - return output - - def query(self, name, data, data_type, **kwargs): - """Serialize data intended for a URL query. - - :param str name: The name of the query parameter. - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str, list - :raises: TypeError if serialization fails. - :raises: ValueError if data is None - :returns: The serialized query parameter - """ - try: - # Treat the list aside, since we don't want to encode the div separator - if data_type.startswith("["): - internal_data_type = data_type[1:-1] - do_quote = not kwargs.get("skip_quote", False) - return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) - - # Not a list, regular serialization - output = self.serialize_data(data, data_type, **kwargs) - if data_type == "bool": - output = json.dumps(output) - if kwargs.get("skip_quote") is True: - output = str(output) - else: - output = quote(str(output), safe="") - except SerializationError as exc: - raise TypeError("{} must be type {}.".format(name, data_type)) from exc - return str(output) - - def header(self, name, data, data_type, **kwargs): - """Serialize data intended for a request header. - - :param str name: The name of the header. - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str - :raises: TypeError if serialization fails. - :raises: ValueError if data is None - :returns: The serialized header - """ - try: - if data_type in ["[str]"]: - data = ["" if d is None else d for d in data] - - output = self.serialize_data(data, data_type, **kwargs) - if data_type == "bool": - output = json.dumps(output) - except SerializationError as exc: - raise TypeError("{} must be type {}.".format(name, data_type)) from exc - return str(output) - - def serialize_data(self, data, data_type, **kwargs): - """Serialize generic data according to supplied data type. - - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :raises: AttributeError if required data is None. - :raises: ValueError if data is None - :raises: SerializationError if serialization fails. - :returns: The serialized data. - :rtype: str, int, float, bool, dict, list - """ - if data is None: - raise ValueError("No value for given attribute") - - try: - if data is CoreNull: - return None - if data_type in self.basic_types.values(): - return self.serialize_basic(data, data_type, **kwargs) - - if data_type in self.serialize_type: - return self.serialize_type[data_type](data, **kwargs) - - # If dependencies is empty, try with current data class - # It has to be a subclass of Enum anyway - enum_type = self.dependencies.get(data_type, data.__class__) - if issubclass(enum_type, Enum): - return Serializer.serialize_enum(data, enum_obj=enum_type) - - iter_type = data_type[0] + data_type[-1] - if iter_type in self.serialize_type: - return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) - - except (ValueError, TypeError) as err: - msg = "Unable to serialize value: {!r} as type: {!r}." - raise SerializationError(msg.format(data, data_type)) from err - return self._serialize(data, **kwargs) - - @classmethod - def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements - custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) - if custom_serializer: - return custom_serializer - if kwargs.get("is_xml", False): - return cls._xml_basic_types_serializers.get(data_type) - - @classmethod - def serialize_basic(cls, data, data_type, **kwargs): - """Serialize basic builting data type. - Serializes objects to str, int, float or bool. - - Possible kwargs: - - basic_types_serializers dict[str, callable] : If set, use the callable as serializer - - is_xml bool : If set, use xml_basic_types_serializers - - :param obj data: Object to be serialized. - :param str data_type: Type of object in the iterable. - :rtype: str, int, float, bool - :return: serialized object - """ - custom_serializer = cls._get_custom_serializers(data_type, **kwargs) - if custom_serializer: - return custom_serializer(data) - if data_type == "str": - return cls.serialize_unicode(data) - return eval(data_type)(data) # nosec # pylint: disable=eval-used - - @classmethod - def serialize_unicode(cls, data): - """Special handling for serializing unicode strings in Py2. - Encode to UTF-8 if unicode, otherwise handle as a str. - - :param str data: Object to be serialized. - :rtype: str - :return: serialized object - """ - try: # If I received an enum, return its value - return data.value - except AttributeError: - pass - - try: - if isinstance(data, unicode): # type: ignore - # Don't change it, JSON and XML ElementTree are totally able - # to serialize correctly u'' strings - return data - except NameError: - return str(data) - return str(data) - - def serialize_iter(self, data, iter_type, div=None, **kwargs): - """Serialize iterable. - - Supported kwargs: - - serialization_ctxt dict : The current entry of _attribute_map, or same format. - serialization_ctxt['type'] should be same as data_type. - - is_xml bool : If set, serialize as XML - - :param list data: Object to be serialized. - :param str iter_type: Type of object in the iterable. - :param str div: If set, this str will be used to combine the elements - in the iterable into a combined string. Default is 'None'. - Defaults to False. - :rtype: list, str - :return: serialized iterable - """ - if isinstance(data, str): - raise SerializationError("Refuse str type as a valid iter type.") - - serialization_ctxt = kwargs.get("serialization_ctxt", {}) - is_xml = kwargs.get("is_xml", False) - - serialized = [] - for d in data: - try: - serialized.append(self.serialize_data(d, iter_type, **kwargs)) - except ValueError as err: - if isinstance(err, SerializationError): - raise - serialized.append(None) - - if kwargs.get("do_quote", False): - serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] - - if div: - serialized = ["" if s is None else str(s) for s in serialized] - serialized = div.join(serialized) - - if "xml" in serialization_ctxt or is_xml: - # XML serialization is more complicated - xml_desc = serialization_ctxt.get("xml", {}) - xml_name = xml_desc.get("name") - if not xml_name: - xml_name = serialization_ctxt["key"] - - # Create a wrap node if necessary (use the fact that Element and list have "append") - is_wrapped = xml_desc.get("wrapped", False) - node_name = xml_desc.get("itemsName", xml_name) - if is_wrapped: - final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) - else: - final_result = [] - # All list elements to "local_node" - for el in serialized: - if isinstance(el, ET.Element): - el_node = el - else: - el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) - if el is not None: # Otherwise it writes "None" :-p - el_node.text = str(el) - final_result.append(el_node) - return final_result - return serialized - - def serialize_dict(self, attr, dict_type, **kwargs): - """Serialize a dictionary of objects. - - :param dict attr: Object to be serialized. - :param str dict_type: Type of object in the dictionary. - :rtype: dict - :return: serialized dictionary - """ - serialization_ctxt = kwargs.get("serialization_ctxt", {}) - serialized = {} - for key, value in attr.items(): - try: - serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) - except ValueError as err: - if isinstance(err, SerializationError): - raise - serialized[self.serialize_unicode(key)] = None - - if "xml" in serialization_ctxt: - # XML serialization is more complicated - xml_desc = serialization_ctxt["xml"] - xml_name = xml_desc["name"] - - final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) - for key, value in serialized.items(): - ET.SubElement(final_result, key).text = value - return final_result - - return serialized - - def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements - """Serialize a generic object. - This will be handled as a dictionary. If object passed in is not - a basic type (str, int, float, dict, list) it will simply be - cast to str. - - :param dict attr: Object to be serialized. - :rtype: dict or str - :return: serialized object - """ - if attr is None: - return None - if isinstance(attr, ET.Element): - return attr - obj_type = type(attr) - if obj_type in self.basic_types: - return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) - if obj_type is _long_type: - return self.serialize_long(attr) - if obj_type is str: - return self.serialize_unicode(attr) - if obj_type is datetime.datetime: - return self.serialize_iso(attr) - if obj_type is datetime.date: - return self.serialize_date(attr) - if obj_type is datetime.time: - return self.serialize_time(attr) - if obj_type is datetime.timedelta: - return self.serialize_duration(attr) - if obj_type is decimal.Decimal: - return self.serialize_decimal(attr) - - # If it's a model or I know this dependency, serialize as a Model - if obj_type in self.dependencies.values() or isinstance(attr, Model): - return self._serialize(attr) - - if obj_type == dict: - serialized = {} - for key, value in attr.items(): - try: - serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) - except ValueError: - serialized[self.serialize_unicode(key)] = None - return serialized - - if obj_type == list: - serialized = [] - for obj in attr: - try: - serialized.append(self.serialize_object(obj, **kwargs)) - except ValueError: - pass - return serialized - return str(attr) - - @staticmethod - def serialize_enum(attr, enum_obj=None): - try: - result = attr.value - except AttributeError: - result = attr - try: - enum_obj(result) # type: ignore - return result - except ValueError as exc: - for enum_value in enum_obj: # type: ignore - if enum_value.value.lower() == str(attr).lower(): - return enum_value.value - error = "{!r} is not valid value for enum {!r}" - raise SerializationError(error.format(attr, enum_obj)) from exc - - @staticmethod - def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument - """Serialize bytearray into base-64 string. - - :param str attr: Object to be serialized. - :rtype: str - :return: serialized base64 - """ - return b64encode(attr).decode() - - @staticmethod - def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument - """Serialize str into base-64 string. - - :param str attr: Object to be serialized. - :rtype: str - :return: serialized base64 - """ - encoded = b64encode(attr).decode("ascii") - return encoded.strip("=").replace("+", "-").replace("/", "_") - - @staticmethod - def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Decimal object to float. - - :param decimal attr: Object to be serialized. - :rtype: float - :return: serialized decimal - """ - return float(attr) - - @staticmethod - def serialize_long(attr, **kwargs): # pylint: disable=unused-argument - """Serialize long (Py2) or int (Py3). - - :param int attr: Object to be serialized. - :rtype: int/long - :return: serialized long - """ - return _long_type(attr) - - @staticmethod - def serialize_date(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Date object into ISO-8601 formatted string. - - :param Date attr: Object to be serialized. - :rtype: str - :return: serialized date - """ - if isinstance(attr, str): - attr = isodate.parse_date(attr) - t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) - return t - - @staticmethod - def serialize_time(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Time object into ISO-8601 formatted string. - - :param datetime.time attr: Object to be serialized. - :rtype: str - :return: serialized time - """ - if isinstance(attr, str): - attr = isodate.parse_time(attr) - t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) - if attr.microsecond: - t += ".{:02}".format(attr.microsecond) - return t - - @staticmethod - def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument - """Serialize TimeDelta object into ISO-8601 formatted string. - - :param TimeDelta attr: Object to be serialized. - :rtype: str - :return: serialized duration - """ - if isinstance(attr, str): - attr = isodate.parse_duration(attr) - return isodate.duration_isoformat(attr) - - @staticmethod - def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Datetime object into RFC-1123 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: TypeError if format invalid. - :return: serialized rfc - """ - try: - if not attr.tzinfo: - _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") - utc = attr.utctimetuple() - except AttributeError as exc: - raise TypeError("RFC1123 object must be valid Datetime object.") from exc - - return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( - Serializer.days[utc.tm_wday], - utc.tm_mday, - Serializer.months[utc.tm_mon], - utc.tm_year, - utc.tm_hour, - utc.tm_min, - utc.tm_sec, - ) - - @staticmethod - def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: SerializationError if format invalid. - :return: serialized iso - """ - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - if not attr.tzinfo: - _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") - if microseconds: - microseconds = "." + microseconds - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec - ) - return date + microseconds + "Z" - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise SerializationError(msg) from err - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise TypeError(msg) from err - - @staticmethod - def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Datetime object into IntTime format. - This is represented as seconds. - - :param Datetime attr: Object to be serialized. - :rtype: int - :raises: SerializationError if format invalid - :return: serialied unix - """ - if isinstance(attr, int): - return attr - try: - if not attr.tzinfo: - _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") - return int(calendar.timegm(attr.utctimetuple())) - except AttributeError as exc: - raise TypeError("Unix time object must be valid Datetime object.") from exc - - -def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument - key = attr_desc["key"] - working_data = data - - while "." in key: - # Need the cast, as for some reasons "split" is typed as list[str | Any] - dict_keys = cast(List[str], _FLATTEN.split(key)) - if len(dict_keys) == 1: - key = _decode_attribute_map_key(dict_keys[0]) - break - working_key = _decode_attribute_map_key(dict_keys[0]) - working_data = working_data.get(working_key, data) - if working_data is None: - # If at any point while following flatten JSON path see None, it means - # that all properties under are None as well - return None - key = ".".join(dict_keys[1:]) - - return working_data.get(key) - - -def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements - attr, attr_desc, data -): - key = attr_desc["key"] - working_data = data - - while "." in key: - dict_keys = _FLATTEN.split(key) - if len(dict_keys) == 1: - key = _decode_attribute_map_key(dict_keys[0]) - break - working_key = _decode_attribute_map_key(dict_keys[0]) - working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) - if working_data is None: - # If at any point while following flatten JSON path see None, it means - # that all properties under are None as well - return None - key = ".".join(dict_keys[1:]) - - if working_data: - return attribute_key_case_insensitive_extractor(key, None, working_data) - - -def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument - """Extract the attribute in "data" based on the last part of the JSON path key. - - :param str attr: The attribute to extract - :param dict attr_desc: The attribute description - :param dict data: The data to extract from - :rtype: object - :returns: The extracted attribute - """ - key = attr_desc["key"] - dict_keys = _FLATTEN.split(key) - return attribute_key_extractor(dict_keys[-1], None, data) - - -def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument - """Extract the attribute in "data" based on the last part of the JSON path key. - - This is the case insensitive version of "last_rest_key_extractor" - :param str attr: The attribute to extract - :param dict attr_desc: The attribute description - :param dict data: The data to extract from - :rtype: object - :returns: The extracted attribute - """ - key = attr_desc["key"] - dict_keys = _FLATTEN.split(key) - return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) - - -def attribute_key_extractor(attr, _, data): - return data.get(attr) - - -def attribute_key_case_insensitive_extractor(attr, _, data): - found_key = None - lower_attr = attr.lower() - for key in data: - if lower_attr == key.lower(): - found_key = key - break - - return data.get(found_key) - - -def _extract_name_from_internal_type(internal_type): - """Given an internal type XML description, extract correct XML name with namespace. - - :param dict internal_type: An model type - :rtype: tuple - :returns: A tuple XML name + namespace dict - """ - internal_type_xml_map = getattr(internal_type, "_xml_map", {}) - xml_name = internal_type_xml_map.get("name", internal_type.__name__) - xml_ns = internal_type_xml_map.get("ns", None) - if xml_ns: - xml_name = "{{{}}}{}".format(xml_ns, xml_name) - return xml_name - - -def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements - if isinstance(data, dict): - return None - - # Test if this model is XML ready first - if not isinstance(data, ET.Element): - return None - - xml_desc = attr_desc.get("xml", {}) - xml_name = xml_desc.get("name", attr_desc["key"]) - - # Look for a children - is_iter_type = attr_desc["type"].startswith("[") - is_wrapped = xml_desc.get("wrapped", False) - internal_type = attr_desc.get("internalType", None) - internal_type_xml_map = getattr(internal_type, "_xml_map", {}) - - # Integrate namespace if necessary - xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) - if xml_ns: - xml_name = "{{{}}}{}".format(xml_ns, xml_name) - - # If it's an attribute, that's simple - if xml_desc.get("attr", False): - return data.get(xml_name) - - # If it's x-ms-text, that's simple too - if xml_desc.get("text", False): - return data.text - - # Scenario where I take the local name: - # - Wrapped node - # - Internal type is an enum (considered basic types) - # - Internal type has no XML/Name node - if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): - children = data.findall(xml_name) - # If internal type has a local name and it's not a list, I use that name - elif not is_iter_type and internal_type and "name" in internal_type_xml_map: - xml_name = _extract_name_from_internal_type(internal_type) - children = data.findall(xml_name) - # That's an array - else: - if internal_type: # Complex type, ignore itemsName and use the complex type name - items_name = _extract_name_from_internal_type(internal_type) - else: - items_name = xml_desc.get("itemsName", xml_name) - children = data.findall(items_name) - - if len(children) == 0: - if is_iter_type: - if is_wrapped: - return None # is_wrapped no node, we want None - return [] # not wrapped, assume empty list - return None # Assume it's not there, maybe an optional node. - - # If is_iter_type and not wrapped, return all found children - if is_iter_type: - if not is_wrapped: - return children - # Iter and wrapped, should have found one node only (the wrap one) - if len(children) != 1: - raise DeserializationError( - "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( # pylint: disable=line-too-long - xml_name - ) - ) - return list(children[0]) # Might be empty list and that's ok. - - # Here it's not a itertype, we should have found one element only or empty - if len(children) > 1: - raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) - return children[0] - - -class Deserializer(object): - """Response object model deserializer. - - :param dict classes: Class type dictionary for deserializing complex types. - :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. - """ - - basic_types = {str: "str", int: "int", bool: "bool", float: "float"} - - valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") - - def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: - self.deserialize_type = { - "iso-8601": Deserializer.deserialize_iso, - "rfc-1123": Deserializer.deserialize_rfc, - "unix-time": Deserializer.deserialize_unix, - "duration": Deserializer.deserialize_duration, - "date": Deserializer.deserialize_date, - "time": Deserializer.deserialize_time, - "decimal": Deserializer.deserialize_decimal, - "long": Deserializer.deserialize_long, - "bytearray": Deserializer.deserialize_bytearray, - "base64": Deserializer.deserialize_base64, - "object": self.deserialize_object, - "[]": self.deserialize_iter, - "{}": self.deserialize_dict, - } - self.deserialize_expected_types = { - "duration": (isodate.Duration, datetime.timedelta), - "iso-8601": (datetime.datetime), - } - self.dependencies: Dict[str, type] = dict(classes) if classes else {} - self.key_extractors = [rest_key_extractor, xml_key_extractor] - # Additional properties only works if the "rest_key_extractor" is used to - # extract the keys. Making it to work whatever the key extractor is too much - # complicated, with no real scenario for now. - # So adding a flag to disable additional properties detection. This flag should be - # used if your expect the deserialization to NOT come from a JSON REST syntax. - # Otherwise, result are unexpected - self.additional_properties_detection = True - - def __call__(self, target_obj, response_data, content_type=None): - """Call the deserializer to process a REST response. - - :param str target_obj: Target data type to deserialize to. - :param requests.Response response_data: REST response object. - :param str content_type: Swagger "produces" if available. - :raises: DeserializationError if deserialization fails. - :return: Deserialized object. - :rtype: object - """ - data = self._unpack_content(response_data, content_type) - return self._deserialize(target_obj, data) - - def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements - """Call the deserializer on a model. - - Data needs to be already deserialized as JSON or XML ElementTree - - :param str target_obj: Target data type to deserialize to. - :param object data: Object to deserialize. - :raises: DeserializationError if deserialization fails. - :return: Deserialized object. - :rtype: object - """ - # This is already a model, go recursive just in case - if hasattr(data, "_attribute_map"): - constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] - try: - for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access - if attr in constants: - continue - value = getattr(data, attr) - if value is None: - continue - local_type = mapconfig["type"] - internal_data_type = local_type.strip("[]{}") - if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): - continue - setattr(data, attr, self._deserialize(local_type, value)) - return data - except AttributeError: - return - - response, class_name = self._classify_target(target_obj, data) - - if isinstance(response, str): - return self.deserialize_data(data, response) - if isinstance(response, type) and issubclass(response, Enum): - return self.deserialize_enum(data, response) - - if data is None or data is CoreNull: - return data - try: - attributes = response._attribute_map # type: ignore # pylint: disable=protected-access - d_attrs = {} - for attr, attr_desc in attributes.items(): - # Check empty string. If it's not empty, someone has a real "additionalProperties"... - if attr == "additional_properties" and attr_desc["key"] == "": - continue - raw_value = None - # Enhance attr_desc with some dynamic data - attr_desc = attr_desc.copy() # Do a copy, do not change the real one - internal_data_type = attr_desc["type"].strip("[]{}") - if internal_data_type in self.dependencies: - attr_desc["internalType"] = self.dependencies[internal_data_type] - - for key_extractor in self.key_extractors: - found_value = key_extractor(attr, attr_desc, data) - if found_value is not None: - if raw_value is not None and raw_value != found_value: - msg = ( - "Ignoring extracted value '%s' from %s for key '%s'" - " (duplicate extraction, follow extractors order)" - ) - _LOGGER.warning(msg, found_value, key_extractor, attr) - continue - raw_value = found_value - - value = self.deserialize_data(raw_value, attr_desc["type"]) - d_attrs[attr] = value - except (AttributeError, TypeError, KeyError) as err: - msg = "Unable to deserialize to object: " + class_name # type: ignore - raise DeserializationError(msg) from err - additional_properties = self._build_additional_properties(attributes, data) - return self._instantiate_model(response, d_attrs, additional_properties) - - def _build_additional_properties(self, attribute_map, data): - if not self.additional_properties_detection: - return None - if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": - # Check empty string. If it's not empty, someone has a real "additionalProperties" - return None - if isinstance(data, ET.Element): - data = {el.tag: el.text for el in data} - - known_keys = { - _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) - for desc in attribute_map.values() - if desc["key"] != "" - } - present_keys = set(data.keys()) - missing_keys = present_keys - known_keys - return {key: data[key] for key in missing_keys} - - def _classify_target(self, target, data): - """Check to see whether the deserialization target object can - be classified into a subclass. - Once classification has been determined, initialize object. - - :param str target: The target object type to deserialize to. - :param str/dict data: The response data to deserialize. - :return: The classified target object and its class name. - :rtype: tuple - """ - if target is None: - return None, None - - if isinstance(target, str): - try: - target = self.dependencies[target] - except KeyError: - return target, target - - try: - target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access - except AttributeError: - pass # Target is not a Model, no classify - return target, target.__class__.__name__ # type: ignore - - def failsafe_deserialize(self, target_obj, data, content_type=None): - """Ignores any errors encountered in deserialization, - and falls back to not deserializing the object. Recommended - for use in error deserialization, as we want to return the - HttpResponseError to users, and not have them deal with - a deserialization error. - - :param str target_obj: The target object type to deserialize to. - :param str/dict data: The response data to deserialize. - :param str content_type: Swagger "produces" if available. - :return: Deserialized object. - :rtype: object - """ - try: - return self(target_obj, data, content_type=content_type) - except: # pylint: disable=bare-except - _LOGGER.debug( - "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True - ) - return None - - @staticmethod - def _unpack_content(raw_data, content_type=None): - """Extract the correct structure for deserialization. - - If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. - if we can't, raise. Your Pipeline should have a RawDeserializer. - - If not a pipeline response and raw_data is bytes or string, use content-type - to decode it. If no content-type, try JSON. - - If raw_data is something else, bypass all logic and return it directly. - - :param obj raw_data: Data to be processed. - :param str content_type: How to parse if raw_data is a string/bytes. - :raises JSONDecodeError: If JSON is requested and parsing is impossible. - :raises UnicodeDecodeError: If bytes is not UTF8 - :rtype: object - :return: Unpacked content. - """ - # Assume this is enough to detect a Pipeline Response without importing it - context = getattr(raw_data, "context", {}) - if context: - if RawDeserializer.CONTEXT_NAME in context: - return context[RawDeserializer.CONTEXT_NAME] - raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") - - # Assume this is enough to recognize universal_http.ClientResponse without importing it - if hasattr(raw_data, "body"): - return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) - - # Assume this enough to recognize requests.Response without importing it. - if hasattr(raw_data, "_content_consumed"): - return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) - - if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): - return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore - return raw_data - - def _instantiate_model(self, response, attrs, additional_properties=None): - """Instantiate a response model passing in deserialized args. - - :param Response response: The response model class. - :param dict attrs: The deserialized response attributes. - :param dict additional_properties: Additional properties to be set. - :rtype: Response - :return: The instantiated response model. - """ - if callable(response): - subtype = getattr(response, "_subtype_map", {}) - try: - readonly = [ - k for k, v in response._validation.items() if v.get("readonly") # pylint: disable=protected-access - ] - const = [ - k for k, v in response._validation.items() if v.get("constant") # pylint: disable=protected-access - ] - kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} - response_obj = response(**kwargs) - for attr in readonly: - setattr(response_obj, attr, attrs.get(attr)) - if additional_properties: - response_obj.additional_properties = additional_properties - return response_obj - except TypeError as err: - msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore - raise DeserializationError(msg + str(err)) from err - else: - try: - for attr, value in attrs.items(): - setattr(response, attr, value) - return response - except Exception as exp: - msg = "Unable to populate response model. " - msg += "Type: {}, Error: {}".format(type(response), exp) - raise DeserializationError(msg) from exp - - def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements - """Process data for deserialization according to data type. - - :param str data: The response string to be deserialized. - :param str data_type: The type to deserialize to. - :raises: DeserializationError if deserialization fails. - :return: Deserialized object. - :rtype: object - """ - if data is None: - return data - - try: - if not data_type: - return data - if data_type in self.basic_types.values(): - return self.deserialize_basic(data, data_type) - if data_type in self.deserialize_type: - if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): - return data - - is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment - "object", - "[]", - r"{}", - ] - if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: - return None - data_val = self.deserialize_type[data_type](data) - return data_val - - iter_type = data_type[0] + data_type[-1] - if iter_type in self.deserialize_type: - return self.deserialize_type[iter_type](data, data_type[1:-1]) - - obj_type = self.dependencies[data_type] - if issubclass(obj_type, Enum): - if isinstance(data, ET.Element): - data = data.text - return self.deserialize_enum(data, obj_type) - - except (ValueError, TypeError, AttributeError) as err: - msg = "Unable to deserialize response data." - msg += " Data: {}, {}".format(data, data_type) - raise DeserializationError(msg) from err - return self._deserialize(obj_type, data) - - def deserialize_iter(self, attr, iter_type): - """Deserialize an iterable. - - :param list attr: Iterable to be deserialized. - :param str iter_type: The type of object in the iterable. - :return: Deserialized iterable. - :rtype: list - """ - if attr is None: - return None - if isinstance(attr, ET.Element): # If I receive an element here, get the children - attr = list(attr) - if not isinstance(attr, (list, set)): - raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) - return [self.deserialize_data(a, iter_type) for a in attr] - - def deserialize_dict(self, attr, dict_type): - """Deserialize a dictionary. - - :param dict/list attr: Dictionary to be deserialized. Also accepts - a list of key, value pairs. - :param str dict_type: The object type of the items in the dictionary. - :return: Deserialized dictionary. - :rtype: dict - """ - if isinstance(attr, list): - return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} - - if isinstance(attr, ET.Element): - # Transform value into {"Key": "value"} - attr = {el.tag: el.text for el in attr} - return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} - - def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements - """Deserialize a generic object. - This will be handled as a dictionary. - - :param dict attr: Dictionary to be deserialized. - :return: Deserialized object. - :rtype: dict - :raises: TypeError if non-builtin datatype encountered. - """ - if attr is None: - return None - if isinstance(attr, ET.Element): - # Do no recurse on XML, just return the tree as-is - return attr - if isinstance(attr, str): - return self.deserialize_basic(attr, "str") - obj_type = type(attr) - if obj_type in self.basic_types: - return self.deserialize_basic(attr, self.basic_types[obj_type]) - if obj_type is _long_type: - return self.deserialize_long(attr) - - if obj_type == dict: - deserialized = {} - for key, value in attr.items(): - try: - deserialized[key] = self.deserialize_object(value, **kwargs) - except ValueError: - deserialized[key] = None - return deserialized - - if obj_type == list: - deserialized = [] - for obj in attr: - try: - deserialized.append(self.deserialize_object(obj, **kwargs)) - except ValueError: - pass - return deserialized - - error = "Cannot deserialize generic object with type: " - raise TypeError(error + str(obj_type)) - - def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements - """Deserialize basic builtin data type from string. - Will attempt to convert to str, int, float and bool. - This function will also accept '1', '0', 'true' and 'false' as - valid bool values. - - :param str attr: response string to be deserialized. - :param str data_type: deserialization data type. - :return: Deserialized basic type. - :rtype: str, int, float or bool - :raises: TypeError if string format is not valid. - """ - # If we're here, data is supposed to be a basic type. - # If it's still an XML node, take the text - if isinstance(attr, ET.Element): - attr = attr.text - if not attr: - if data_type == "str": - # None or '', node is empty string. - return "" - # None or '', node with a strong type is None. - # Don't try to model "empty bool" or "empty int" - return None - - if data_type == "bool": - if attr in [True, False, 1, 0]: - return bool(attr) - if isinstance(attr, str): - if attr.lower() in ["true", "1"]: - return True - if attr.lower() in ["false", "0"]: - return False - raise TypeError("Invalid boolean value: {}".format(attr)) - - if data_type == "str": - return self.deserialize_unicode(attr) - return eval(data_type)(attr) # nosec # pylint: disable=eval-used - - @staticmethod - def deserialize_unicode(data): - """Preserve unicode objects in Python 2, otherwise return data - as a string. - - :param str data: response string to be deserialized. - :return: Deserialized string. - :rtype: str or unicode - """ - # We might be here because we have an enum modeled as string, - # and we try to deserialize a partial dict with enum inside - if isinstance(data, Enum): - return data - - # Consider this is real string - try: - if isinstance(data, unicode): # type: ignore - return data - except NameError: - return str(data) - return str(data) - - @staticmethod - def deserialize_enum(data, enum_obj): - """Deserialize string into enum object. - - If the string is not a valid enum value it will be returned as-is - and a warning will be logged. - - :param str data: Response string to be deserialized. If this value is - None or invalid it will be returned as-is. - :param Enum enum_obj: Enum object to deserialize to. - :return: Deserialized enum object. - :rtype: Enum - """ - if isinstance(data, enum_obj) or data is None: - return data - if isinstance(data, Enum): - data = data.value - if isinstance(data, int): - # Workaround. We might consider remove it in the future. - try: - return list(enum_obj.__members__.values())[data] - except IndexError as exc: - error = "{!r} is not a valid index for enum {!r}" - raise DeserializationError(error.format(data, enum_obj)) from exc - try: - return enum_obj(str(data)) - except ValueError: - for enum_value in enum_obj: - if enum_value.value.lower() == str(data).lower(): - return enum_value - # We don't fail anymore for unknown value, we deserialize as a string - _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) - return Deserializer.deserialize_unicode(data) - - @staticmethod - def deserialize_bytearray(attr): - """Deserialize string into bytearray. - - :param str attr: response string to be deserialized. - :return: Deserialized bytearray - :rtype: bytearray - :raises: TypeError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - return bytearray(b64decode(attr)) # type: ignore - - @staticmethod - def deserialize_base64(attr): - """Deserialize base64 encoded string into string. - - :param str attr: response string to be deserialized. - :return: Deserialized base64 string - :rtype: bytearray - :raises: TypeError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore - attr = attr + padding # type: ignore - encoded = attr.replace("-", "+").replace("_", "/") - return b64decode(encoded) - - @staticmethod - def deserialize_decimal(attr): - """Deserialize string into Decimal object. - - :param str attr: response string to be deserialized. - :return: Deserialized decimal - :raises: DeserializationError if string format invalid. - :rtype: decimal - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - return decimal.Decimal(str(attr)) # type: ignore - except decimal.DecimalException as err: - msg = "Invalid decimal {}".format(attr) - raise DeserializationError(msg) from err - - @staticmethod - def deserialize_long(attr): - """Deserialize string into long (Py2) or int (Py3). - - :param str attr: response string to be deserialized. - :return: Deserialized int - :rtype: long or int - :raises: ValueError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - return _long_type(attr) # type: ignore - - @staticmethod - def deserialize_duration(attr): - """Deserialize ISO-8601 formatted string into TimeDelta object. - - :param str attr: response string to be deserialized. - :return: Deserialized duration - :rtype: TimeDelta - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - duration = isodate.parse_duration(attr) - except (ValueError, OverflowError, AttributeError) as err: - msg = "Cannot deserialize duration object." - raise DeserializationError(msg) from err - return duration - - @staticmethod - def deserialize_date(attr): - """Deserialize ISO-8601 formatted string into Date object. - - :param str attr: response string to be deserialized. - :return: Deserialized date - :rtype: Date - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore - raise DeserializationError("Date must have only digits and -. Received: %s" % attr) - # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. - return isodate.parse_date(attr, defaultmonth=0, defaultday=0) - - @staticmethod - def deserialize_time(attr): - """Deserialize ISO-8601 formatted string into time object. - - :param str attr: response string to be deserialized. - :return: Deserialized time - :rtype: datetime.time - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore - raise DeserializationError("Date must have only digits and -. Received: %s" % attr) - return isodate.parse_time(attr) - - @staticmethod - def deserialize_rfc(attr): - """Deserialize RFC-1123 formatted string into Datetime object. - - :param str attr: response string to be deserialized. - :return: Deserialized RFC datetime - :rtype: Datetime - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - parsed_date = email.utils.parsedate_tz(attr) # type: ignore - date_obj = datetime.datetime( - *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) - ) - if not date_obj.tzinfo: - date_obj = date_obj.astimezone(tz=TZ_UTC) - except ValueError as err: - msg = "Cannot deserialize to rfc datetime object." - raise DeserializationError(msg) from err - return date_obj - - @staticmethod - def deserialize_iso(attr): - """Deserialize ISO-8601 formatted string into Datetime object. - - :param str attr: response string to be deserialized. - :return: Deserialized ISO datetime - :rtype: Datetime - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - attr = attr.upper() # type: ignore - match = Deserializer.valid_date.match(attr) - if not match: - raise ValueError("Invalid datetime string: " + attr) - - check_decimal = attr.split(".") - if len(check_decimal) > 1: - decimal_str = "" - for digit in check_decimal[1]: - if digit.isdigit(): - decimal_str += digit - else: - break - if len(decimal_str) > 6: - attr = attr.replace(decimal_str, decimal_str[0:6]) - - date_obj = isodate.parse_datetime(attr) - test_utc = date_obj.utctimetuple() - if test_utc.tm_year > 9999 or test_utc.tm_year < 1: - raise OverflowError("Hit max or min date") - except (ValueError, OverflowError, AttributeError) as err: - msg = "Cannot deserialize datetime object." - raise DeserializationError(msg) from err - return date_obj - - @staticmethod - def deserialize_unix(attr): - """Serialize Datetime object into IntTime format. - This is represented as seconds. - - :param int attr: Object to be serialized. - :return: Deserialized datetime - :rtype: Datetime - :raises: DeserializationError if format invalid - """ - if isinstance(attr, ET.Element): - attr = int(attr.text) # type: ignore - try: - attr = int(attr) - date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) - except ValueError as err: - msg = "Cannot deserialize to unix datetime object." - raise DeserializationError(msg) from err - return date_obj diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_types.py b/sdk/ai/azure-ai-project/azure/ai/project/_types.py deleted file mode 100644 index c438829bda41..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/_types.py +++ /dev/null @@ -1,18 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING, Union - -if TYPE_CHECKING: - from . import models as _models - from .. import models as _models -AgentsApiResponseFormatOption = Union[ - str, str, "_models.AgentsApiResponseFormatMode", "_models.AgentsApiResponseFormat" -] -MessageAttachmentToolDefinition = Union["_models.CodeInterpreterToolDefinition", "_models.FileSearchToolDefinition"] -AgentsApiToolChoiceOption = Union[str, str, "_models.AgentsApiToolChoiceOptionMode", "_models.AgentsNamedToolChoice"] diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_vendor.py b/sdk/ai/azure-ai-project/azure/ai/project/_vendor.py deleted file mode 100644 index e6f010934827..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/_vendor.py +++ /dev/null @@ -1,50 +0,0 @@ -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import json -from typing import Any, Dict, IO, List, Mapping, Optional, Tuple, Union - -from ._model_base import Model, SdkJSONEncoder - - -# file-like tuple could be `(filename, IO (or bytes))` or `(filename, IO (or bytes), content_type)` -FileContent = Union[str, bytes, IO[str], IO[bytes]] - -FileType = Union[ - # file (or bytes) - FileContent, - # (filename, file (or bytes)) - Tuple[Optional[str], FileContent], - # (filename, file (or bytes), content_type) - Tuple[Optional[str], FileContent, Optional[str]], -] - - -def serialize_multipart_data_entry(data_entry: Any) -> Any: - if isinstance(data_entry, (list, tuple, dict, Model)): - return json.dumps(data_entry, cls=SdkJSONEncoder, exclude_readonly=True) - return data_entry - - -def prepare_multipart_form_data( - body: Mapping[str, Any], multipart_fields: List[str], data_fields: List[str] -) -> Tuple[List[FileType], Dict[str, Any]]: - files: List[FileType] = [] - data: Dict[str, Any] = {} - for multipart_field in multipart_fields: - multipart_entry = body.get(multipart_field) - if isinstance(multipart_entry, list): - files.extend([(multipart_field, e) for e in multipart_entry]) - elif multipart_entry: - files.append((multipart_field, multipart_entry)) - - for data_field in data_fields: - data_entry = body.get(data_field) - if data_entry: - data[data_field] = serialize_multipart_data_entry(data_entry) - - return files, data diff --git a/sdk/ai/azure-ai-project/azure/ai/project/_version.py b/sdk/ai/azure-ai-project/azure/ai/project/_version.py deleted file mode 100644 index be71c81bd282..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/_version.py +++ /dev/null @@ -1,9 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -VERSION = "1.0.0b1" diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/__init__.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/__init__.py deleted file mode 100644 index d5beb6bf7f83..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/aio/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# pylint: disable=wrong-import-position - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from ._patch import * # pylint: disable=unused-wildcard-import - -from ._client import AIProjectClient # type: ignore - -try: - from ._patch import __all__ as _patch_all - from ._patch import * -except ImportError: - _patch_all = [] -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "AIProjectClient", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore - -_patch_sdk() diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/_client.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/_client.py deleted file mode 100644 index f0d74b0d7477..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/aio/_client.py +++ /dev/null @@ -1,139 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any, Awaitable, TYPE_CHECKING -from typing_extensions import Self - -from azure.core import AsyncPipelineClient -from azure.core.pipeline import policies -from azure.core.rest import AsyncHttpResponse, HttpRequest - -from .._serialization import Deserializer, Serializer -from ._configuration import AIProjectClientConfiguration -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations - -if TYPE_CHECKING: - from azure.core.credentials_async import AsyncTokenCredential - - -class AIProjectClient: - """AIProjectClient. - - :ivar agents: AgentsOperations operations - :vartype agents: azure.ai.project.aio.operations.AgentsOperations - :ivar connections: ConnectionsOperations operations - :vartype connections: azure.ai.project.aio.operations.ConnectionsOperations - :ivar evaluations: EvaluationsOperations operations - :vartype evaluations: azure.ai.project.aio.operations.EvaluationsOperations - :param endpoint: The Azure AI Studio project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``\\\\ , where - :code:`` is the Azure region where the project is deployed (e.g. westus) and - :code:`` is the GUID of the Enterprise private link. Required. - :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Studio project name. Required. - :type project_name: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-07-01-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "AsyncTokenCredential", - **kwargs: Any - ) -> None: - _endpoint = "{endpoint}/agents/v1.0/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{projectName}" # pylint: disable=line-too-long - self._config = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - **kwargs - ) - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) - self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) - self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) - - def send_request( - self, request: HttpRequest, *, stream: bool = False, **kwargs: Any - ) -> Awaitable[AsyncHttpResponse]: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = await client.send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.AsyncHttpResponse - """ - - request_copy = deepcopy(request) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - - request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) - return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> Self: - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details: Any) -> None: - await self._client.__aexit__(*exc_details) diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/_configuration.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/_configuration.py deleted file mode 100644 index 0785b42c4c94..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/aio/_configuration.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline import policies - -from .._version import VERSION - -if TYPE_CHECKING: - from azure.core.credentials_async import AsyncTokenCredential - - -class AIProjectClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for AIProjectClient. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param endpoint: The Azure AI Studio project endpoint, in the form - ``https://.api.azureml.ms`` or - ``https://..api.azureml.ms``\\ , where :code:`` - is the Azure region where the project is deployed (e.g. westus) and :code:`` - is the GUID of the Enterprise private link. Required. - :type endpoint: str - :param subscription_id: The Azure subscription ID. Required. - :type subscription_id: str - :param resource_group_name: The name of the Azure Resource Group. Required. - :type resource_group_name: str - :param project_name: The Azure AI Studio project name. Required. - :type project_name: str - :param credential: Credential used to authenticate requests to the service. Required. - :type credential: ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is - "2024-07-01-preview". Note that overriding this default value may result in unsupported - behavior. - :paramtype api_version: str - """ - - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "AsyncTokenCredential", - **kwargs: Any - ) -> None: - api_version: str = kwargs.pop("api_version", "2024-07-01-preview") - - if endpoint is None: - raise ValueError("Parameter 'endpoint' must not be None.") - if subscription_id is None: - raise ValueError("Parameter 'subscription_id' must not be None.") - if resource_group_name is None: - raise ValueError("Parameter 'resource_group_name' must not be None.") - if project_name is None: - raise ValueError("Parameter 'project_name' must not be None.") - if credential is None: - raise ValueError("Parameter 'credential' must not be None.") - - self.endpoint = endpoint - self.subscription_id = subscription_id - self.resource_group_name = resource_group_name - self.project_name = project_name - self.credential = credential - self.api_version = api_version - self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) - kwargs.setdefault("sdk_moniker", "ai-project/{}".format(VERSION)) - self.polling_interval = kwargs.get("polling_interval", 30) - self._configure(**kwargs) - - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") - if self.credential and not self.authentication_policy: - self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy( - self.credential, *self.credential_scopes, **kwargs - ) diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/_patch.py deleted file mode 100644 index d1a7e6d84569..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/aio/_patch.py +++ /dev/null @@ -1,200 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -from typing import List, Any -from azure.core import AsyncPipelineClient -from azure.core.pipeline import policies -from typing_extensions import Self - -from .._serialization import Deserializer, Serializer -from ._configuration import AIProjectClientConfiguration -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations -from ._client import AIProjectClient as ClientGenerated -from .operations._patch import InferenceOperations - - -class AIProjectClient(ClientGenerated): - - def __init__( - self, - endpoint: str, - subscription_id: str, - resource_group_name: str, - project_name: str, - credential: "AsyncTokenCredential", - **kwargs: Any, - ) -> None: - # TODO: Validate input formats with regex match (e.g. subscription ID) - if not endpoint: - raise ValueError("endpoint is required") - if not subscription_id: - raise ValueError("subscription_id ID is required") - if not resource_group_name: - raise ValueError("resource_group_name is required") - if not project_name: - raise ValueError("project_name is required") - if not credential: - raise ValueError("Credential is required") - if "api_version" in kwargs: - raise ValueError("No support for overriding the API version") - if "credential_scopes" in kwargs: - raise ValueError("No support for overriding the credential scopes") - - kwargs1 = kwargs.copy() - kwargs2 = kwargs.copy() - kwargs3 = kwargs.copy() - - # For Endpoints operations (enumerating connections, getting SAS tokens) - _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config1 = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", - credential_scopes=["https://management.azure.com"], - **kwargs1, - ) - _policies1 = kwargs1.pop("policies", None) - if _policies1 is None: - _policies1 = [ - policies.RequestIdPolicy(**kwargs1), - self._config1.headers_policy, - self._config1.user_agent_policy, - self._config1.proxy_policy, - policies.ContentDecodePolicy(**kwargs1), - self._config1.redirect_policy, - self._config1.retry_policy, - self._config1.authentication_policy, - self._config1.custom_hook_policy, - self._config1.logging_policy, - policies.DistributedTracingPolicy(**kwargs1), - policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, - self._config1.http_logging_policy, - ] - self._client1 = AsyncPipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) - - # For Agents operations - _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config2 = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://ml.azure.com"], - **kwargs2, - ) - _policies2 = kwargs2.pop("policies", None) - if _policies2 is None: - _policies2 = [ - policies.RequestIdPolicy(**kwargs2), - self._config2.headers_policy, - self._config2.user_agent_policy, - self._config2.proxy_policy, - policies.ContentDecodePolicy(**kwargs2), - self._config2.redirect_policy, - self._config2.retry_policy, - self._config2.authentication_policy, - self._config2.custom_hook_policy, - self._config2.logging_policy, - policies.DistributedTracingPolicy(**kwargs2), - policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, - self._config2.http_logging_policy, - ] - self._client2 = AsyncPipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) - - # For Cloud Evaluations operations - _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config3 = AIProjectClientConfiguration( - endpoint=endpoint, - subscription_id=subscription_id, - resource_group_name=resource_group_name, - project_name=project_name, - credential=credential, - api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://management.azure.com"], # TODO: Update once service changes are ready - **kwargs3, - ) - _policies3 = kwargs3.pop("policies", None) - if _policies3 is None: - _policies3 = [ - policies.RequestIdPolicy(**kwargs3), - self._config3.headers_policy, - self._config3.user_agent_policy, - self._config3.proxy_policy, - policies.ContentDecodePolicy(**kwargs3), - self._config3.redirect_policy, - self._config3.retry_policy, - self._config3.authentication_policy, - self._config3.custom_hook_policy, - self._config3.logging_policy, - policies.DistributedTracingPolicy(**kwargs3), - policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, - self._config3.http_logging_policy, - ] - self._client3 = AsyncPipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) - - self._serialize = Serializer() - self._deserialize = Deserializer() - self._serialize.client_side_validation = False - - self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) - self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) - self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) - self.inference = InferenceOperations(self) - - async def close(self) -> None: - await self._client1.close() - await self._client2.close() - await self._client3.close() - - async def __aenter__(self) -> Self: - await self._client1.__aenter__() - await self._client2.__aenter__() - await self._client3.__aenter__() - return self - - async def __aexit__(self, *exc_details: Any) -> None: - await self._client1.__aexit__(*exc_details) - await self._client2.__aexit__(*exc_details) - await self._client3.__aexit__(*exc_details) - - @classmethod - def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential", **kwargs) -> "AIProjectClient": - """ - Create an asynchronous AIProjectClient from a connection string. - - :param conn_str: The connection string, copied from your AI Studio project. - """ - if not conn_str: - raise ValueError("Connection string is required") - parts = conn_str.split(";") - if len(parts) != 4: - raise ValueError("Invalid connection string format") - endpoint = "https://" + parts[0] - subscription_id = parts[1] - resource_group_name = parts[2] - project_name = parts[3] - return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) - - -__all__: List[str] = [ - "AIProjectClient", -] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/__init__.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/__init__.py deleted file mode 100644 index 35cf92df96bc..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# pylint: disable=wrong-import-position - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from ._patch import * # pylint: disable=unused-wildcard-import - -from ._operations import AgentsOperations # type: ignore -from ._operations import ConnectionsOperations # type: ignore -from ._operations import EvaluationsOperations # type: ignore - -from ._patch import __all__ as _patch_all -from ._patch import * -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "AgentsOperations", - "ConnectionsOperations", - "EvaluationsOperations", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore -_patch_sdk() diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_operations.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_operations.py deleted file mode 100644 index 6fb61ba111ef..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_operations.py +++ /dev/null @@ -1,6049 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import json -import sys -from typing import Any, AsyncIterable, Callable, Dict, IO, List, Optional, TYPE_CHECKING, TypeVar, Union, overload -import urllib.parse - -from azure.core.async_paging import AsyncItemPaged, AsyncList -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - StreamClosedError, - StreamConsumedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import AsyncHttpResponse, HttpRequest -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.utils import case_insensitive_dict - -from ... import _model_base, models as _models -from ..._model_base import SdkJSONEncoder, _deserialize -from ..._vendor import FileType, prepare_multipart_form_data -from ...operations._operations import ( - build_agents_cancel_run_request, - build_agents_cancel_vector_store_file_batch_request, - build_agents_create_agent_request, - build_agents_create_message_request, - build_agents_create_run_request, - build_agents_create_thread_and_run_request, - build_agents_create_thread_request, - build_agents_create_vector_store_file_batch_request, - build_agents_create_vector_store_file_request, - build_agents_create_vector_store_request, - build_agents_delete_agent_request, - build_agents_delete_file_request, - build_agents_delete_thread_request, - build_agents_delete_vector_store_file_request, - build_agents_delete_vector_store_request, - build_agents_get_agent_request, - build_agents_get_file_content_request, - build_agents_get_file_request, - build_agents_get_message_request, - build_agents_get_run_request, - build_agents_get_run_step_request, - build_agents_get_thread_request, - build_agents_get_vector_store_file_batch_request, - build_agents_get_vector_store_file_request, - build_agents_get_vector_store_request, - build_agents_list_agents_request, - build_agents_list_files_request, - build_agents_list_messages_request, - build_agents_list_run_steps_request, - build_agents_list_runs_request, - build_agents_list_vector_store_file_batch_files_request, - build_agents_list_vector_store_files_request, - build_agents_list_vector_stores_request, - build_agents_modify_vector_store_request, - build_agents_submit_tool_outputs_to_run_request, - build_agents_update_agent_request, - build_agents_update_message_request, - build_agents_update_run_request, - build_agents_update_thread_request, - build_agents_upload_file_request, - build_connections_get_request, - build_connections_list_request, - build_connections_list_secrets_request, - build_evaluations_create_or_replace_schedule_request, - build_evaluations_create_request, - build_evaluations_delete_schedule_request, - build_evaluations_get_request, - build_evaluations_get_schedule_request, - build_evaluations_list_request, - build_evaluations_list_schedule_request, - build_evaluations_update_request, -) - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore - -if TYPE_CHECKING: - from ... import _types -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -class AgentsOperations: # pylint: disable=too-many-public-methods - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.project.aio.AIProjectClient`'s - :attr:`agents` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent( - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) - - if body is _Unset: - if model is _Unset: - raise TypeError("missing required argument: model") - body = { - "description": description, - "instructions": instructions, - "metadata": metadata, - "model": model, - "name": name, - "response_format": response_format, - "temperature": temperature, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_agent_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Agent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_agents( - self, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfAgent: - """Gets a list of agents that were previously created. - - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with - MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfAgent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) - - _request = build_agents_list_agents_request( - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: - """Retrieves an existing agent. - - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) - - _request = build_agents_get_agent_request( - assistant_id=assistant_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Agent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def update_agent( - self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_agent( - self, - assistant_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_agent( - self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update_agent( - self, - assistant_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) - - if body is _Unset: - body = { - "description": description, - "instructions": instructions, - "metadata": metadata, - "model": model, - "name": name, - "response_format": response_format, - "temperature": temperature, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_agent_request( - assistant_id=assistant_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Agent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: - """Deletes an agent. - - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str - :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_agent_request( - assistant_id=assistant_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_thread( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_thread( - self, - *, - content_type: str = "application/json", - messages: Optional[List[_models.ThreadMessageOptions]] = None, - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword messages: The initial messages to associate with the new thread. Default value is - None. - :paramtype messages: list[~azure.ai.project.models.ThreadMessageOptions] - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_thread( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_thread( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - messages: Optional[List[_models.ThreadMessageOptions]] = None, - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword messages: The initial messages to associate with the new thread. Default value is - None. - :paramtype messages: list[~azure.ai.project.models.ThreadMessageOptions] - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_thread_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentThread, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: - """Gets information about an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) - - _request = build_agents_get_thread_request( - thread_id=thread_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentThread, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def update_thread( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_thread( - self, - thread_id: str, - *, - content_type: str = "application/json", - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_thread( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update_thread( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"metadata": metadata, "tool_resources": tool_resources} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_thread_request( - thread_id=thread_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentThread, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: - """Deletes an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_thread_request( - thread_id=thread_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_message( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_message( - self, - thread_id: str, - *, - role: Union[str, _models.MessageRole], - content: str, - content_type: str = "application/json", - attachments: Optional[List[_models.MessageAttachment]] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword role: The role of the entity that is creating the message. Allowed values include: - - - * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Known values are: "user" and "assistant". Required. - :paramtype role: str or ~azure.ai.project.models.MessageRole - :keyword content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :paramtype content: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword attachments: A list of files attached to the message, and the tools they should be - added to. Default value is None. - :paramtype attachments: list[~azure.ai.project.models.MessageAttachment] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_message( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_message( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - role: Union[str, _models.MessageRole] = _Unset, - content: str = _Unset, - attachments: Optional[List[_models.MessageAttachment]] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword role: The role of the entity that is creating the message. Allowed values include: - - - * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Known values are: "user" and "assistant". Required. - :paramtype role: str or ~azure.ai.project.models.MessageRole - :keyword content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :paramtype content: str - :keyword attachments: A list of files attached to the message, and the tools they should be - added to. Default value is None. - :paramtype attachments: list[~azure.ai.project.models.MessageAttachment] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - - if body is _Unset: - if role is _Unset: - raise TypeError("missing required argument: role") - if content is _Unset: - raise TypeError("missing required argument: content") - body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_message_request( - thread_id=thread_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_messages( - self, - thread_id: str, - *, - run_id: Optional[str] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfThreadMessage: - """Gets a list of messages that exist on a thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword run_id: Filter messages by the run ID that generated them. Default value is None. - :paramtype run_id: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible - with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) - - _request = build_agents_list_messages_request( - thread_id=thread_id, - run_id=run_id, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: - """Gets an existing message from an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - - _request = build_agents_get_message_request( - thread_id=thread_id, - message_id=message_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def update_message( - self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_message( - self, - thread_id: str, - message_id: str, - *, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_message( - self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update_message( - self, - thread_id: str, - message_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"metadata": metadata} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_message_request( - thread_id=thread_id, - message_id=message_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_run( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_run( - self, - thread_id: str, - *, - assistant_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_run( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_run( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - if assistant_id is _Unset: - raise TypeError("missing required argument: assistant_id") - body = { - "additional_instructions": additional_instructions, - "additional_messages": additional_messages, - "assistant_id": assistant_id, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "stream": stream_parameter, - "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, - "top_p": top_p, - "truncation_strategy": truncation_strategy, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_run_request( - thread_id=thread_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_runs( - self, - thread_id: str, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfThreadRun: - """Gets a list of runs for a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with - MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) - - _request = build_agents_list_runs_request( - thread_id=thread_id, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: - """Gets an existing run from an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - _request = build_agents_get_run_request( - thread_id=thread_id, - run_id=run_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def update_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_run( - self, - thread_id: str, - run_id: str, - *, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"metadata": metadata} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_run_request( - thread_id=thread_id, - run_id=run_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - stream_parameter: Optional[bool] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. - :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword stream_parameter: If true, returns a stream of events that happen during the Run as - server-sent events, terminating when the run enters a terminal state. Default value is None. - :paramtype stream_parameter: bool - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - stream_parameter: Optional[bool] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. - :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] - :keyword stream_parameter: If true, returns a stream of events that happen during the Run as - server-sent events, terminating when the run enters a terminal state. Default value is None. - :paramtype stream_parameter: bool - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - if tool_outputs is _Unset: - raise TypeError("missing required argument: tool_outputs") - body = {"stream": stream_parameter, "tool_outputs": tool_outputs} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_submit_tool_outputs_to_run_request( - thread_id=thread_id, - run_id=run_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: - """Cancels a run of an in progress thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - _request = build_agents_cancel_run_request( - thread_id=thread_id, - run_id=run_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_thread_and_run( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_thread_and_run( - self, - *, - assistant_id: str, - content_type: str = "application/json", - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :keyword assistant_id: The ID of the agent for which the thread should be created. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword thread: The details used to create the new thread. If no thread is provided, an empty - one will be created. Default value is None. - :paramtype thread: ~azure.ai.project.models.AgentThreadCreationOptions - :keyword model: The overridden model that the agent should use to run the thread. Default value - is None. - :paramtype model: str - :keyword instructions: The overridden system instructions the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword tools: The overridden list of enabled tools the agent should use to run the thread. - Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.UpdateToolResourcesOptions - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort to use only - the number of completion tokens specified, across multiple turns of the run. If the run - exceeds the number of completion tokens - specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more - info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_thread_and_run( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_thread_and_run( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent for which the thread should be created. Required. - :paramtype assistant_id: str - :keyword thread: The details used to create the new thread. If no thread is provided, an empty - one will be created. Default value is None. - :paramtype thread: ~azure.ai.project.models.AgentThreadCreationOptions - :keyword model: The overridden model that the agent should use to run the thread. Default value - is None. - :paramtype model: str - :keyword instructions: The overridden system instructions the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword tools: The overridden list of enabled tools the agent should use to run the thread. - Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.UpdateToolResourcesOptions - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort to use only - the number of completion tokens specified, across multiple turns of the run. If the run - exceeds the number of completion tokens - specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more - info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - if assistant_id is _Unset: - raise TypeError("missing required argument: assistant_id") - body = { - "assistant_id": assistant_id, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "stream": stream_parameter, - "temperature": temperature, - "thread": thread, - "tool_choice": tool_choice, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - "truncation_strategy": truncation_strategy, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_thread_and_run_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> _models.RunStep: - """Gets a single run step from a thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param step_id: Identifier of the run step. Required. - :type step_id: str - :return: RunStep. The RunStep is compatible with MutableMapping - :rtype: ~azure.ai.project.models.RunStep - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) - - _request = build_agents_get_run_step_request( - thread_id=thread_id, - run_id=run_id, - step_id=step_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.RunStep, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_run_steps( - self, - thread_id: str, - run_id: str, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfRunStep: - """Gets a list of run steps from a thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with - MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfRunStep - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) - - _request = build_agents_list_run_steps_request( - thread_id=thread_id, - run_id=run_id, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_files( - self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any - ) -> _models.FileListResponse: - """Gets a list of previously uploaded files. - - :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", - "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is - None. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :return: FileListResponse. The FileListResponse is compatible with MutableMapping - :rtype: ~azure.ai.project.models.FileListResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) - - _request = build_agents_list_files_request( - purpose=purpose, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.FileListResponse, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file( - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: The file data, in bytes. Required. - :paramtype file: ~azure.ai.project._vendor.FileType - :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and - Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and - ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", - "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :keyword filename: The name of the file. Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def upload_file( - self, - body: JSON = _Unset, - *, - file: FileType = _Unset, - purpose: Union[str, _models.FilePurpose] = _Unset, - filename: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Is one of the following types: JSON Required. - :type body: JSON - :keyword file: The file data, in bytes. Required. - :paramtype file: ~azure.ai.project._vendor.FileType - :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and - Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and - ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", - "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :keyword filename: The name of the file. Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) - - if body is _Unset: - if file is _Unset: - raise TypeError("missing required argument: file") - if purpose is _Unset: - raise TypeError("missing required argument: purpose") - body = {"file": file, "filename": filename, "purpose": purpose} - body = {k: v for k, v in body.items() if v is not None} - _body = body.as_dict() if isinstance(body, _model_base.Model) else body - _file_fields: List[str] = ["file"] - _data_fields: List[str] = ["purpose", "filename"] - _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) - - _request = build_agents_upload_file_request( - api_version=self._config.api_version, - files=_files, - data=_data, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: - """Delete a previously uploaded file. - - :param file_id: The ID of the file to delete. Required. - :type file_id: str - :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.project.models.FileDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_file_request( - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.FileDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: - """Returns information about a specific file. Does not retrieve file content. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) - - _request = build_agents_get_file_request( - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentResponse: - """Returns information about a specific file. Does not retrieve file content. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: FileContentResponse. The FileContentResponse is compatible with MutableMapping - :rtype: ~azure.ai.project.models.FileContentResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.FileContentResponse] = kwargs.pop("cls", None) - - _request = build_agents_get_file_content_request( - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.FileContentResponse, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_vector_stores( - self, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfVectorStore: - """Returns a list of vector stores. - - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible - with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfVectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) - - _request = build_agents_list_vector_stores_request( - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_vector_store( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - - if body is _Unset: - body = { - "chunking_strategy": chunking_strategy, - "expires_after": expires_after, - "file_ids": file_ids, - "metadata": metadata, - "name": name, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_vector_store_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: - """Returns the vector store object matching the specified ID. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - - _request = build_agents_get_vector_store_request( - vector_store_id=vector_store_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def modify_vector_store( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def modify_vector_store( - self, - vector_store_id: str, - *, - content_type: str = "application/json", - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def modify_vector_store( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def modify_vector_store( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"expires_after": expires_after, "metadata": metadata, "name": name} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_modify_vector_store_request( - vector_store_id=vector_store_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: - """Deletes the vector store object matching the specified ID. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with - MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_vector_store_request( - vector_store_id=vector_store_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_vector_store_files( - self, - vector_store_id: str, - *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfVectorStoreFile: - """Returns a list of vector store files. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", - and "cancelled". Default value is None. - :paramtype filter: str or ~azure.ai.project.models.VectorStoreFileStatusFilter - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is - compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfVectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) - - _request = build_agents_list_vector_store_files_request( - vector_store_id=vector_store_id, - filter=filter, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_vector_store_file( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file( - self, - vector_store_id: str, - *, - file_id: str, - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_id: Identifier of the file. Required. - :paramtype file_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_file( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_id: str = _Unset, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_id: Identifier of the file. Required. - :paramtype file_id: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) - - if body is _Unset: - if file_id is _Unset: - raise TypeError("missing required argument: file_id") - body = {"chunking_strategy": chunking_strategy, "file_id": file_id} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_vector_store_file_request( - vector_store_id=vector_store_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: - """Retrieves a vector store file. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param file_id: Identifier of the file. Required. - :type file_id: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) - - _request = build_agents_get_vector_store_file_request( - vector_store_id=vector_store_id, - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete_vector_store_file( - self, vector_store_id: str, file_id: str, **kwargs: Any - ) -> _models.VectorStoreFileDeletionStatus: - """Delete a vector store file. This will remove the file from the vector store but the file itself - will not be deleted. - To delete the file, use the delete file endpoint. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param file_id: Identifier of the file. Required. - :type file_id: str - :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with - MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_vector_store_file_request( - vector_store_id=vector_store_id, - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_vector_store_file_batch( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_batch( - self, - vector_store_id: str, - *, - file_ids: List[str], - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_batch( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_file_batch( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: List[str] = _Unset, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - - if body is _Unset: - if file_ids is _Unset: - raise TypeError("missing required argument: file_ids") - body = {"chunking_strategy": chunking_strategy, "file_ids": file_ids} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_vector_store_file_batch_request( - vector_store_id=vector_store_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_vector_store_file_batch( - self, vector_store_id: str, batch_id: str, **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Retrieve a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param batch_id: Identifier of the file batch. Required. - :type batch_id: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - - _request = build_agents_get_vector_store_file_batch_request( - vector_store_id=vector_store_id, - batch_id=batch_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def cancel_vector_store_file_batch( - self, vector_store_id: str, batch_id: str, **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch - as soon as possible. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param batch_id: Identifier of the file batch. Required. - :type batch_id: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - - _request = build_agents_cancel_vector_store_file_batch_request( - vector_store_id=vector_store_id, - batch_id=batch_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list_vector_store_file_batch_files( - self, - vector_store_id: str, - batch_id: str, - *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfVectorStoreFile: - """Returns a list of vector store files in a batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param batch_id: Identifier of the file batch. Required. - :type batch_id: str - :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", - and "cancelled". Default value is None. - :paramtype filter: str or ~azure.ai.project.models.VectorStoreFileStatusFilter - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is - compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfVectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) - - _request = build_agents_list_vector_store_file_batch_files_request( - vector_store_id=vector_store_id, - batch_id=batch_id, - filter=filter, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - -class ConnectionsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.project.aio.AIProjectClient`'s - :attr:`connections` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace_async - async def _list( - self, - *, - category: Optional[Union[str, _models.ConnectionType]] = None, - include_all: Optional[bool] = None, - target: Optional[str] = None, - **kwargs: Any - ) -> _models._models.ConnectionsListResponse: - """List the details of all the connections (not including their credentials). - - :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". Default value is None. - :paramtype category: str or ~azure.ai.project.models.ConnectionType - :keyword include_all: Indicates whether to list datastores. Service default: do not list - datastores. Default value is None. - :paramtype include_all: bool - :keyword target: Target of the workspace connection. Default value is None. - :paramtype target: str - :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping - :rtype: ~azure.ai.project.models._models.ConnectionsListResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) - - _request = build_connections_list_request( - category=category, - include_all=include_all, - target=target, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize( - _models._models.ConnectionsListResponse, response.json() # pylint: disable=protected-access - ) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def _get(self, connection_name: str, **kwargs: Any) -> _models._models.ConnectionsListSecretsResponse: - """Get the details of a single connection, without credentials. - - :param connection_name: Connection Name. Required. - :type connection_name: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.project.models._models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) - - _request = build_connections_get_request( - connection_name=connection_name, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize( - _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access - ) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def _list_secrets( - self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... - @overload - async def _list_secrets( - self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... - @overload - async def _list_secrets( - self, connection_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... - - @distributed_trace_async - async def _list_secrets( - self, connection_name: str, body: Union[JSON, IO[bytes]] = _Unset, *, ignored: str = _Unset, **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: - """Get the details of a single connection, including credentials (if available). - - :param connection_name: Connection Name. Required. - :type connection_name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword ignored: The body is ignored. TODO: Can we remove this?. Required. - :paramtype ignored: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.project.models._models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) - - if body is _Unset: - if ignored is _Unset: - raise TypeError("missing required argument: ignored") - body = {"ignored": ignored} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_connections_list_secrets_request( - connection_name=connection_name, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize( - _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access - ) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - -class EvaluationsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.project.aio.AIProjectClient`'s - :attr:`evaluations` attribute. - """ - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace_async - async def get(self, id: str, **kwargs: Any) -> _models.Evaluation: - """Resource read operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - _request = build_evaluations_get_request( - id=id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @overload - async def create( - self, evaluation: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Required. - :type evaluation: ~azure.ai.project.models.Evaluation - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create( - self, evaluation: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Required. - :type evaluation: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create( - self, evaluation: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Required. - :type evaluation: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Is one of the following types: Evaluation, JSON, - IO[bytes] Required. - :type evaluation: ~azure.ai.project.models.Evaluation or JSON or IO[bytes] - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(evaluation, (IOBase, bytes)): - _content = evaluation - else: - _content = json.dumps(evaluation, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_evaluations_create_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any - ) -> AsyncIterable["_models.Evaluation"]: - """Resource list operation template. - - :keyword top: The number of result items to return. Default value is None. - :paramtype top: int - :keyword skip: The number of result items to skip. Default value is None. - :paramtype skip: int - :return: An iterator like instance of Evaluation - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.project.models.Evaluation] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_evaluations_list_request( - top=top, - skip=skip, - maxpagesize=maxpagesize, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - async def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return AsyncItemPaged(get_next, extract_data) - - @overload - async def update( - self, - id: str, - resource: _models.Evaluation, - *, - content_type: str = "application/merge-patch+json", - **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: ~azure.ai.project.models.Evaluation - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update( - self, id: str, resource: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def update( - self, id: str, resource: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def update( - self, id: str, resource: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Is one of the following types: Evaluation, JSON, - IO[bytes] Required. - :type resource: ~azure.ai.project.models.Evaluation or JSON or IO[bytes] - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - content_type = content_type or "application/merge-patch+json" - _content = None - if isinstance(resource, (IOBase, bytes)): - _content = resource - else: - _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_evaluations_update_request( - id=id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_schedule(self, name: str, **kwargs: Any) -> _models.EvaluationSchedule: - """Resource read operation template. - - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. - :type name: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.project.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) - - _request = build_evaluations_get_schedule_request( - name=name, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.EvaluationSchedule, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @overload - async def create_or_replace_schedule( - self, name: str, resource: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. - :type name: str - :param resource: The resource instance. Required. - :type resource: ~azure.ai.project.models.EvaluationSchedule - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.project.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_or_replace_schedule( - self, name: str, resource: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. - :type name: str - :param resource: The resource instance. Required. - :type resource: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.project.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_or_replace_schedule( - self, name: str, resource: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. - :type name: str - :param resource: The resource instance. Required. - :type resource: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.project.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_or_replace_schedule( - self, name: str, resource: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. - :type name: str - :param resource: The resource instance. Is one of the following types: EvaluationSchedule, - JSON, IO[bytes] Required. - :type resource: ~azure.ai.project.models.EvaluationSchedule or JSON or IO[bytes] - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.project.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(resource, (IOBase, bytes)): - _content = resource - else: - _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_evaluations_create_or_replace_schedule_request( - name=name, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.EvaluationSchedule, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_schedule( - self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any - ) -> AsyncIterable["_models.EvaluationSchedule"]: - """Resource list operation template. - - :keyword top: The number of result items to return. Default value is None. - :paramtype top: int - :keyword skip: The number of result items to skip. Default value is None. - :paramtype skip: int - :return: An iterator like instance of EvaluationSchedule - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.project.models.EvaluationSchedule] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.EvaluationSchedule]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_evaluations_list_schedule_request( - top=top, - skip=skip, - maxpagesize=maxpagesize, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - async def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.EvaluationSchedule], deserialized["value"]) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return AsyncItemPaged(get_next, extract_data) - - @distributed_trace_async - async def delete_schedule(self, name: str, **kwargs: Any) -> None: - """Resource delete operation template. - - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. - :type name: str - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[None] = kwargs.pop("cls", None) - - _request = build_evaluations_delete_schedule_request( - name=name, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if cls: - return cls(pipeline_response, None, response_headers) # type: ignore diff --git a/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_patch.py deleted file mode 100644 index 51d92bfd1ee9..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/aio/operations/_patch.py +++ /dev/null @@ -1,1977 +0,0 @@ -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -from ..._vendor import FileType -import io -import logging -import os -import time -from typing import IO, Any, AsyncIterator, Dict, List, Iterable, MutableMapping, Optional, Union, cast, overload - -from azure.ai.project import _types -from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated -from ._operations import AgentsOperations as AgentsOperationsGenerated -from ...models._patch import ConnectionProperties -from ...models._enums import AuthenticationType, ConnectionType, FilePurpose -from ...models._models import ConnectionsListSecretsResponse, ConnectionsListResponse -from ... import models as _models -from azure.core.tracing.decorator_async import distributed_trace_async - -logger = logging.getLogger(__name__) - -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() - - -class InferenceOperations: - - def __init__(self, outer_instance): - self.outer_instance = outer_instance - - @distributed_trace_async - async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": - """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for the default - Serverless connection. The Serverless connection must have a Chat Completions AI model deployment. - The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. - - :return: An authenticated chat completions client - :rtype: ~azure.ai.inference.models.ChatCompletionsClient - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connection = await self.outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No serverless connection found") - - try: - from azure.ai.inference.aio import ChatCompletionsClient - except ModuleNotFoundError as _: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) - ) - elif connection.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" - ) - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential - ) - elif connection.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. - logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" - ) - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace_async - async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": - """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for the default - Serverless connection. The Serverless connection must have a Text Embeddings AI model deployment. - The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. - - :return: An authenticated chat completions client - :rtype: ~azure.ai.inference.models.EmbeddingsClient - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connection = await self.outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No serverless connection found") - - try: - from azure.ai.inference.aio import EmbeddingsClient - except ModuleNotFoundError as _: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) - elif connection.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" - ) - client = EmbeddingsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential - ) - elif connection.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" - ) - client = EmbeddingsClient(endpoint=connection.connection_url, credential=connection.token_credential) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace_async - async def get_azure_openai_client(self, **kwargs) -> "AsyncAzureOpenAI": - """Get an authenticated AsyncAzureOpenAI client (from the `openai` package) for the default - Azure OpenAI connection. The package `openai` must be installed prior to calling this method. - - :return: An authenticated AsyncAzureOpenAI client - :rtype: ~openai.AsyncAzureOpenAI - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connection = await self.outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No Azure OpenAI connection found.") - - try: - from openai import AsyncAzureOpenAI - except ModuleNotFoundError as _: - raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai-async'") - - # Pick latest GA version from the "Data plane - Inference" row in the table - # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - AZURE_OPENAI_API_VERSION = "2024-06-01" - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" - ) - client = AsyncAzureOpenAI( - api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION - ) - elif connection.authentication_type == AuthenticationType.AAD: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" - ) - try: - from azure.identity import get_bearer_token_provider - except ModuleNotFoundError as _: - raise ModuleNotFoundError( - "azure.identity package not installed. Please install it using 'pip install azure.identity'" - ) - client = AsyncAzureOpenAI( - # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") - client = AsyncAzureOpenAI( - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION, - ) - else: - raise ValueError("Unknown authentication type") - - return client - - -class ConnectionsOperations(ConnectionsOperationsGenerated): - - @distributed_trace_async - async def get_default( - self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: - """Get the properties of the default connection of a certain connection type, with or without - populating authentication credentials. - - :param connection_type: The connection type. Required. - :type connection_type: ~azure.ai.project.models._models.ConnectionType - :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. - :type with_credentials: bool - :return: The connection properties - :rtype: ~azure.ai.project.models._models.ConnectionProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_type: - raise ValueError("You must specify an connection type") - # Since there is no notion of default connection at the moment, list all connections in the category - # and return the first one - connection_properties_list = await self.list(connection_type=connection_type, **kwargs) - if len(connection_properties_list) > 0: - if with_credentials: - return await self.get( - connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs - ) - else: - return connection_properties_list[0] - else: - return None - - @distributed_trace_async - async def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: - """Get the properties of a single connection, given its connection name, with or without - populating authentication credentials. - - :param connection_name: Connection Name. Required. - :type connection_name: str - :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. - :type with_credentials: bool - :return: The connection properties - :rtype: ~azure.ai.project.models._models.ConnectionProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_name: - raise ValueError("Endpoint name cannot be empty") - if with_credentials: - connection: ConnectionsListSecretsResponse = await self._list_secrets( - connection_name=connection_name, ignored="ignore", **kwargs - ) - if connection.properties.auth_type == AuthenticationType.AAD: - return ConnectionProperties(connection=connection, token_credential=self._config.credential) - elif connection.properties.auth_type == AuthenticationType.SAS: - from ...models._patch import SASTokenCredential - - token_credential = SASTokenCredential( - sas_token=connection.properties.credentials.sas, - credential=self._config.credential, - subscription_id=self._config.subscription_id, - resource_group_name=self._config.resource_group_name, - project_name=self._config.project_name, - connection_name=connection_name, - ) - return ConnectionProperties(connection=connection, token_credential=token_credential) - - return ConnectionProperties(connection=connection) - else: - return ConnectionProperties(connection=await self._get(connection_name=connection_name, **kwargs)) - - @distributed_trace_async - async def list( - self, *, connection_type: ConnectionType | None = None, **kwargs: Any - ) -> Iterable[ConnectionProperties]: - """List the properties of all connections, or all connections of a certain connection type. - - :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. - If not provided, all connections are listed. - :type connection_type: ~azure.ai.project.models._models.ConnectionType - :return: A list of connection properties - :rtype: Iterable[~azure.ai.project.models._models.ConnectionProperties] - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connections_list: ConnectionsListResponse = await self._list( - include_all=True, category=connection_type, **kwargs - ) - - # Iterate to create the simplified result property - connection_properties_list: List[ConnectionProperties] = [] - for connection in connections_list.value: - connection_properties_list.append(ConnectionProperties(connection=connection)) - - return connection_properties_list - - -class AgentsOperations(AgentsOperationsGenerated): - - @overload - async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent( - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent( - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.project.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_agent( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.AsyncToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.Agent: - """ - Creates a new agent with various configurations, delegating to the generated operations. - - :param body: JSON or IO[bytes]. Required if `model` is not provided. - :param model: The ID of the model to use. Required if `body` is not provided. - :param name: The name of the new agent. - :param description: A description for the new agent. - :param instructions: System instructions for the agent. - :param tools: List of tools definitions for the agent. - :param tool_resources: Resources used by the agent's tools. - :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). - :param temperature: Sampling temperature for generating agent responses. - :param top_p: Nucleus sampling parameter. - :param response_format: Response format for tool calls. - :param metadata: Key/value pairs for storing additional information. - :param content_type: Content type of the body. - :param kwargs: Additional parameters. - :return: An Agent object. - :raises: HttpResponseError for HTTP errors. - """ - if body is not _Unset: - if isinstance(body, io.IOBase): - return await super().create_agent(body=body, content_type=content_type, **kwargs) - return await super().create_agent(body=body, **kwargs) - - if toolset is not None: - self._toolset = toolset - tools = toolset.definitions - tool_resources = toolset.resources - - return await super().create_agent( - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - def get_toolset(self) -> Optional[_models.AsyncToolSet]: - """ - Get the toolset for the agent. - - :return: The toolset for the agent. If not set, returns None. - :rtype: ~azure.ai.project.models.AsyncToolSet - """ - if hasattr(self, "_toolset"): - return self._toolset - return None - - @overload - async def create_run( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_run( - self, - thread_id: str, - *, - assistant_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_run( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_run( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - elif assistant_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=False, - stream=False, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # If streaming is enabled, return the custom stream object - return await response - - @distributed_trace_async - async def create_and_process_run( - self, - thread_id: str, - assistant_id: str, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: int = 1, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread and processes the run. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. - Default value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run - the thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or - ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or - ~azure.ai.project.models.AgentsApiResponseFormatMode or - ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: The time in seconds to wait between polling the service for run status. - Default value is 1. - :paramtype sleep_interval: int - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - # Create and initiate the run with additional parameters - run = await self.create_run( - thread_id=thread_id, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - # Monitor and process the run status - while run.status in ["queued", "in_progress", "requires_action"]: - time.sleep(sleep_interval) - run = await self.get_run(thread_id=thread_id, run_id=run.id) - - if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logging.warning("No tool calls provided - cancelling run") - await self.cancel_run(thread_id=thread_id, run_id=run.id) - break - - toolset = self.get_toolset() - if toolset: - tool_outputs = await toolset.execute_tool_calls(tool_calls) - else: - raise ValueError("Toolset is not available in the client.") - - logging.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - await self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) - - logging.info("Current run status: %s", run.status) - - return run - - @overload - def create_stream( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AsyncAgentRunStream: - """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_stream( - self, - thread_id: str, - *, - assistant_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.project.models.AsyncAgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_stream( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AsyncAgentRunStream: - """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream: - """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.project.models.AsyncAgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - elif assistant_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=True, - stream=True, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) - - return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - @overload - async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.project.models.AsyncAgentEventHandler - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] - :param event_handler: The event handler to use for processing events during the run. - :param kwargs: Additional parameters. - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # If streaming is enabled, return the custom stream object - return await response - - @overload - async def submit_tool_outputs_to_stream( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AsyncAgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.project.models.AsyncAgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def submit_tool_outputs_to_stream( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AsyncAgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: Optional[_models.AsyncAgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AsyncAgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] - :param event_handler: The event handler to use for processing events during the run. - :param kwargs: Additional parameters. - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # Cast the response to Iterator[bytes] for type correctness - response_iterator: AsyncIterator[bytes] = cast(AsyncIterator[bytes], await response) - - return _models.AsyncAgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - async def _handle_submit_tool_outputs( - self, run: _models.ThreadRun, event_handler: Optional[_models.AsyncAgentEventHandler] = None - ) -> None: - if isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logger.debug("No tool calls to execute.") - return - - toolset = self.get_toolset() - if toolset: - tool_outputs = await toolset.execute_tool_calls(tool_calls) - else: - logger.warning("Toolset is not available in the client.") - return - - logger.info(f"Tool outputs: {tool_outputs}") - if tool_outputs: - async with await self.submit_tool_outputs_to_stream( - thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler - ) as stream: - await stream.until_done() - - @overload - async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file( - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.project._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file( - self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def upload_file( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :param file: File content. Required if `body` and `purpose` are not provided. - :param file_path: Path to the file. Required if `body` and `purpose` are not provided. - :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :param filename: The name of the file. - :param kwargs: Additional parameters. - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - return await super().upload_file(body=body, **kwargs) - - if isinstance(purpose, FilePurpose): - purpose = purpose.value - - if file is not None and purpose is not None: - return await super().upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - - if file_path is not None and purpose is not None: - if not os.path.isfile(file_path): - raise FileNotFoundError(f"The file path provided does not exist: {file_path}") - - try: - with open(file_path, "rb") as f: - content = f.read() - - # Determine filename and create correct FileType - base_filename = filename or os.path.basename(file_path) - file_content: FileType = (base_filename, content) - - return await super().upload_file(file=file_content, purpose=purpose, **kwargs) - except IOError as e: - raise IOError(f"Unable to read file: {file_path}. Reason: {str(e)}") - - raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") - - @overload - async def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file_and_poll( - self, - *, - file: FileType, - purpose: Union[str, _models.FilePurpose], - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.project._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def upload_file_and_poll( - self, file_path: str, *, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def upload_file_and_poll( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :param file: File content. Required if `body` and `purpose` are not provided. - :param file_path: Path to the file. Required if `body` and `purpose` are not provided. - :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :param filename: The name of the file. - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :param kwargs: Additional parameters. - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - uploaded_file = await self.upload_file(body=body, **kwargs) - elif file is not None and purpose is not None: - uploaded_file = await self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - elif file_path is not None and purpose is not None: - uploaded_file = await self.upload_file(file_path=file_path, purpose=purpose, **kwargs) - else: - raise ValueError( - "Invalid parameters for upload_file_and_poll. Please provide either 'body', " - "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." - ) - - while uploaded_file.status in ["uploaded", "pending", "running"]: - time.sleep(sleep_interval) - uploaded_file = await self.get_file(uploaded_file.id) - - return uploaded_file - - @overload - async def create_vector_store_and_poll( - self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_and_poll( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_and_poll( - self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_and_poll( - self, - body: Union[JSON, IO[bytes], None] = None, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not None: - vector_store = await self.create_vector_store(body=body, content_type=content_type, **kwargs) - elif file_ids is not None or (name is not None and expires_after is not None): - vector_store = await self.create_vector_store( - content_type=content_type, - file_ids=file_ids, - name=name, - expires_after=expires_after, - chunking_strategy=chunking_strategy, - metadata=metadata, - **kwargs, - ) - else: - raise ValueError( - "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " - "'file_ids', or 'name' and 'expires_after'." - ) - - while vector_store.status == "in_progress": - time.sleep(sleep_interval) - vector_store = await self.get_vector_store(vector_store.id) - - return vector_store - - @overload - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - *, - file_ids: List[str], - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = None, - *, - file_ids: List[str] = _Unset, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is None: - vector_store_file_batch = await super().create_vector_store_file_batch( - vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs - ) - else: - content_type = kwargs.get("content_type", "application/json") - vector_store_file_batch = await super().create_vector_store_file_batch( - body=body, content_type=content_type, **kwargs - ) - - while vector_store_file_batch.status == "in_progress": - time.sleep(sleep_interval) - vector_store_file_batch = await super().get_vector_store_file_batch( - vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id - ) - - return vector_store_file_batch - - -__all__: List[str] = [ - "AgentsOperations", - "ConnectionsOperations", - "InferenceOperations", -] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/ai/azure-ai-project/azure/ai/project/models/__init__.py b/sdk/ai/azure-ai-project/azure/ai/project/models/__init__.py deleted file mode 100644 index f6ed04e4637b..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/models/__init__.py +++ /dev/null @@ -1,376 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# pylint: disable=wrong-import-position - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from ._patch import * # pylint: disable=unused-wildcard-import - - -from ._models import ( # type: ignore - Agent, - AgentDeletionStatus, - AgentThread, - AgentThreadCreationOptions, - AgentsApiResponseFormat, - AgentsNamedToolChoice, - ApplicationInsightsConfiguration, - AzureAISearchResource, - AzureAISearchToolDefinition, - BingGroundingToolDefinition, - CodeInterpreterToolDefinition, - CodeInterpreterToolResource, - ConnectionListResource, - ConnectionResource, - CronTrigger, - Dataset, - Evaluation, - EvaluationSchedule, - EvaluatorConfiguration, - FileContentResponse, - FileDeletionStatus, - FileListResponse, - FileSearchToolDefinition, - FileSearchToolDefinitionDetails, - FileSearchToolResource, - FunctionDefinition, - FunctionName, - FunctionToolDefinition, - IndexResource, - InputData, - MessageAttachment, - MessageContent, - MessageDelta, - MessageDeltaChunk, - MessageDeltaContent, - MessageDeltaImageFileContent, - MessageDeltaImageFileContentObject, - MessageDeltaTextAnnotation, - MessageDeltaTextContent, - MessageDeltaTextContentObject, - MessageDeltaTextFileCitationAnnotation, - MessageDeltaTextFileCitationAnnotationObject, - MessageDeltaTextFilePathAnnotation, - MessageDeltaTextFilePathAnnotationObject, - MessageImageFileContent, - MessageImageFileDetails, - MessageIncompleteDetails, - MessageTextAnnotation, - MessageTextContent, - MessageTextDetails, - MessageTextFileCitationAnnotation, - MessageTextFileCitationDetails, - MessageTextFilePathAnnotation, - MessageTextFilePathDetails, - MicrosoftFabricToolDefinition, - OpenAIFile, - OpenAIPageableListOfAgent, - OpenAIPageableListOfRunStep, - OpenAIPageableListOfThreadMessage, - OpenAIPageableListOfThreadRun, - OpenAIPageableListOfVectorStore, - OpenAIPageableListOfVectorStoreFile, - RecurrenceSchedule, - RecurrenceTrigger, - RequiredAction, - RequiredFunctionToolCall, - RequiredFunctionToolCallDetails, - RequiredToolCall, - RunCompletionUsage, - RunError, - RunStep, - RunStepAzureAISearchToolCall, - RunStepBingGroundingToolCall, - RunStepCodeInterpreterImageOutput, - RunStepCodeInterpreterImageReference, - RunStepCodeInterpreterLogOutput, - RunStepCodeInterpreterToolCall, - RunStepCodeInterpreterToolCallDetails, - RunStepCodeInterpreterToolCallOutput, - RunStepCompletionUsage, - RunStepDelta, - RunStepDeltaChunk, - RunStepDeltaCodeInterpreterDetailItemObject, - RunStepDeltaCodeInterpreterImageOutput, - RunStepDeltaCodeInterpreterImageOutputObject, - RunStepDeltaCodeInterpreterLogOutput, - RunStepDeltaCodeInterpreterOutput, - RunStepDeltaCodeInterpreterToolCall, - RunStepDeltaDetail, - RunStepDeltaFileSearchToolCall, - RunStepDeltaFunction, - RunStepDeltaFunctionToolCall, - RunStepDeltaMessageCreation, - RunStepDeltaMessageCreationObject, - RunStepDeltaToolCall, - RunStepDeltaToolCallObject, - RunStepDetails, - RunStepError, - RunStepFileSearchToolCall, - RunStepFunctionToolCall, - RunStepFunctionToolCallDetails, - RunStepMessageCreationDetails, - RunStepMessageCreationReference, - RunStepMicrosoftFabricToolCall, - RunStepSharepointToolCall, - RunStepToolCall, - RunStepToolCallDetails, - SamplingStrategy, - SharepointToolDefinition, - SubmitToolOutputsAction, - SubmitToolOutputsDetails, - SystemData, - ThreadDeletionStatus, - ThreadMessage, - ThreadMessageOptions, - ThreadRun, - ToolDefinition, - ToolOutput, - ToolResources, - Trigger, - TruncationObject, - UpdateCodeInterpreterToolResourceOptions, - UpdateFileSearchToolResourceOptions, - UpdateToolResourcesOptions, - VectorStore, - VectorStoreAutoChunkingStrategyRequest, - VectorStoreAutoChunkingStrategyResponse, - VectorStoreChunkingStrategyRequest, - VectorStoreChunkingStrategyResponse, - VectorStoreDeletionStatus, - VectorStoreExpirationPolicy, - VectorStoreFile, - VectorStoreFileBatch, - VectorStoreFileCount, - VectorStoreFileDeletionStatus, - VectorStoreFileError, - VectorStoreStaticChunkingStrategyOptions, - VectorStoreStaticChunkingStrategyRequest, - VectorStoreStaticChunkingStrategyResponse, -) - -from ._enums import ( # type: ignore - AgentStreamEvent, - AgentsApiResponseFormatMode, - AgentsApiToolChoiceOptionMode, - AgentsNamedToolChoiceType, - ApiResponseFormat, - AuthenticationType, - ConnectionType, - DoneEvent, - ErrorEvent, - FilePurpose, - FileState, - Frequency, - IncompleteRunDetails, - ListSortOrder, - MessageIncompleteDetailsReason, - MessageRole, - MessageStatus, - MessageStreamEvent, - RunStatus, - RunStepErrorCode, - RunStepStatus, - RunStepStreamEvent, - RunStepType, - RunStreamEvent, - ThreadStreamEvent, - TruncationStrategy, - VectorStoreChunkingStrategyRequestType, - VectorStoreChunkingStrategyResponseType, - VectorStoreExpirationPolicyAnchor, - VectorStoreFileBatchStatus, - VectorStoreFileErrorCode, - VectorStoreFileStatus, - VectorStoreFileStatusFilter, - VectorStoreStatus, - WeekDays, -) -from ._patch import __all__ as _patch_all -from ._patch import * -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "Agent", - "AgentDeletionStatus", - "AgentThread", - "AgentThreadCreationOptions", - "AgentsApiResponseFormat", - "AgentsNamedToolChoice", - "ApplicationInsightsConfiguration", - "AzureAISearchResource", - "AzureAISearchToolDefinition", - "BingGroundingToolDefinition", - "CodeInterpreterToolDefinition", - "CodeInterpreterToolResource", - "ConnectionListResource", - "ConnectionResource", - "CronTrigger", - "Dataset", - "Evaluation", - "EvaluationSchedule", - "EvaluatorConfiguration", - "FileContentResponse", - "FileDeletionStatus", - "FileListResponse", - "FileSearchToolDefinition", - "FileSearchToolDefinitionDetails", - "FileSearchToolResource", - "FunctionDefinition", - "FunctionName", - "FunctionToolDefinition", - "IndexResource", - "InputData", - "MessageAttachment", - "MessageContent", - "MessageDelta", - "MessageDeltaChunk", - "MessageDeltaContent", - "MessageDeltaImageFileContent", - "MessageDeltaImageFileContentObject", - "MessageDeltaTextAnnotation", - "MessageDeltaTextContent", - "MessageDeltaTextContentObject", - "MessageDeltaTextFileCitationAnnotation", - "MessageDeltaTextFileCitationAnnotationObject", - "MessageDeltaTextFilePathAnnotation", - "MessageDeltaTextFilePathAnnotationObject", - "MessageImageFileContent", - "MessageImageFileDetails", - "MessageIncompleteDetails", - "MessageTextAnnotation", - "MessageTextContent", - "MessageTextDetails", - "MessageTextFileCitationAnnotation", - "MessageTextFileCitationDetails", - "MessageTextFilePathAnnotation", - "MessageTextFilePathDetails", - "MicrosoftFabricToolDefinition", - "OpenAIFile", - "OpenAIPageableListOfAgent", - "OpenAIPageableListOfRunStep", - "OpenAIPageableListOfThreadMessage", - "OpenAIPageableListOfThreadRun", - "OpenAIPageableListOfVectorStore", - "OpenAIPageableListOfVectorStoreFile", - "RecurrenceSchedule", - "RecurrenceTrigger", - "RequiredAction", - "RequiredFunctionToolCall", - "RequiredFunctionToolCallDetails", - "RequiredToolCall", - "RunCompletionUsage", - "RunError", - "RunStep", - "RunStepAzureAISearchToolCall", - "RunStepBingGroundingToolCall", - "RunStepCodeInterpreterImageOutput", - "RunStepCodeInterpreterImageReference", - "RunStepCodeInterpreterLogOutput", - "RunStepCodeInterpreterToolCall", - "RunStepCodeInterpreterToolCallDetails", - "RunStepCodeInterpreterToolCallOutput", - "RunStepCompletionUsage", - "RunStepDelta", - "RunStepDeltaChunk", - "RunStepDeltaCodeInterpreterDetailItemObject", - "RunStepDeltaCodeInterpreterImageOutput", - "RunStepDeltaCodeInterpreterImageOutputObject", - "RunStepDeltaCodeInterpreterLogOutput", - "RunStepDeltaCodeInterpreterOutput", - "RunStepDeltaCodeInterpreterToolCall", - "RunStepDeltaDetail", - "RunStepDeltaFileSearchToolCall", - "RunStepDeltaFunction", - "RunStepDeltaFunctionToolCall", - "RunStepDeltaMessageCreation", - "RunStepDeltaMessageCreationObject", - "RunStepDeltaToolCall", - "RunStepDeltaToolCallObject", - "RunStepDetails", - "RunStepError", - "RunStepFileSearchToolCall", - "RunStepFunctionToolCall", - "RunStepFunctionToolCallDetails", - "RunStepMessageCreationDetails", - "RunStepMessageCreationReference", - "RunStepMicrosoftFabricToolCall", - "RunStepSharepointToolCall", - "RunStepToolCall", - "RunStepToolCallDetails", - "SamplingStrategy", - "SharepointToolDefinition", - "SubmitToolOutputsAction", - "SubmitToolOutputsDetails", - "SystemData", - "ThreadDeletionStatus", - "ThreadMessage", - "ThreadMessageOptions", - "ThreadRun", - "ToolDefinition", - "ToolOutput", - "ToolResources", - "Trigger", - "TruncationObject", - "UpdateCodeInterpreterToolResourceOptions", - "UpdateFileSearchToolResourceOptions", - "UpdateToolResourcesOptions", - "VectorStore", - "VectorStoreAutoChunkingStrategyRequest", - "VectorStoreAutoChunkingStrategyResponse", - "VectorStoreChunkingStrategyRequest", - "VectorStoreChunkingStrategyResponse", - "VectorStoreDeletionStatus", - "VectorStoreExpirationPolicy", - "VectorStoreFile", - "VectorStoreFileBatch", - "VectorStoreFileCount", - "VectorStoreFileDeletionStatus", - "VectorStoreFileError", - "VectorStoreStaticChunkingStrategyOptions", - "VectorStoreStaticChunkingStrategyRequest", - "VectorStoreStaticChunkingStrategyResponse", - "AgentStreamEvent", - "AgentsApiResponseFormatMode", - "AgentsApiToolChoiceOptionMode", - "AgentsNamedToolChoiceType", - "ApiResponseFormat", - "AuthenticationType", - "ConnectionType", - "DoneEvent", - "ErrorEvent", - "FilePurpose", - "FileState", - "Frequency", - "IncompleteRunDetails", - "ListSortOrder", - "MessageIncompleteDetailsReason", - "MessageRole", - "MessageStatus", - "MessageStreamEvent", - "RunStatus", - "RunStepErrorCode", - "RunStepStatus", - "RunStepStreamEvent", - "RunStepType", - "RunStreamEvent", - "ThreadStreamEvent", - "TruncationStrategy", - "VectorStoreChunkingStrategyRequestType", - "VectorStoreChunkingStrategyResponseType", - "VectorStoreExpirationPolicyAnchor", - "VectorStoreFileBatchStatus", - "VectorStoreFileErrorCode", - "VectorStoreFileStatus", - "VectorStoreFileStatusFilter", - "VectorStoreStatus", - "WeekDays", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore -_patch_sdk() diff --git a/sdk/ai/azure-ai-project/azure/ai/project/models/_enums.py b/sdk/ai/azure-ai-project/azure/ai/project/models/_enums.py deleted file mode 100644 index 7ca731b7639b..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/models/_enums.py +++ /dev/null @@ -1,513 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum -from azure.core import CaseInsensitiveEnumMeta - - -class AgentsApiResponseFormatMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Represents the mode in which the model will handle the return format of a tool call.""" - - AUTO = "auto" - """Default value. Let the model handle the return format.""" - NONE = "none" - """Setting the value to ``none``\\ , will result in a 400 Bad request.""" - - -class AgentsApiToolChoiceOptionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies how the tool choice will be used.""" - - NONE = "none" - """The model will not call a function and instead generates a message.""" - AUTO = "auto" - """The model can pick between generating a message or calling a function.""" - - -class AgentsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Available tool types for agents named tools.""" - - FUNCTION = "function" - """Tool type ``function``""" - CODE_INTERPRETER = "code_interpreter" - """Tool type ``code_interpreter``""" - FILE_SEARCH = "file_search" - """Tool type ``file_search``""" - BING_GROUNDING = "bing_grounding" - """Tool type ``bing_grounding``""" - MICROSOFT_FABRIC = "microsoft_fabric" - """Tool type ``microsoft_fabric``""" - SHAREPOINT = "sharepoint" - """Tool type ``sharepoint``""" - AZURE_AI_SEARCH = "azure_ai_search" - """Tool type ``azure_ai_search``""" - - -class AgentStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Each event in a server-sent events stream has an ``event`` and ``data`` property: - - .. code-block:: - - event: thread.created - data: {"id": "thread_123", "object": "thread", ...} - - We emit events whenever a new object is created, transitions to a new state, or is being - streamed in parts (deltas). For example, we emit ``thread.run.created`` when a new run - is created, ``thread.run.completed`` when a run completes, and so on. When an Agent chooses - to create a message during a run, we emit a ``thread.message.created event``\\ , a - ``thread.message.in_progress`` event, many ``thread.message.delta`` events, and finally a - ``thread.message.completed`` event. - - We may add additional events over time, so we recommend handling unknown events gracefully - in your code. - """ - - THREAD_CREATED = "thread.created" - """Event sent when a new thread is created. The data of this event is of type AgentThread""" - THREAD_RUN_CREATED = "thread.run.created" - """Event sent when a new run is created. The data of this event is of type ThreadRun""" - THREAD_RUN_QUEUED = "thread.run.queued" - """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" - THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" - """Event sent when a run moves to ``in_progress`` status. The data of this event is of type - ThreadRun""" - THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" - """Event sent when a run moves to ``requires_action`` status. The data of this event is of type - ThreadRun""" - THREAD_RUN_COMPLETED = "thread.run.completed" - """Event sent when a run is completed. The data of this event is of type ThreadRun""" - THREAD_RUN_FAILED = "thread.run.failed" - """Event sent when a run fails. The data of this event is of type ThreadRun""" - THREAD_RUN_CANCELLING = "thread.run.cancelling" - """Event sent when a run moves to ``cancelling`` status. The data of this event is of type - ThreadRun""" - THREAD_RUN_CANCELLED = "thread.run.cancelled" - """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" - THREAD_RUN_EXPIRED = "thread.run.expired" - """Event sent when a run is expired. The data of this event is of type ThreadRun""" - THREAD_RUN_STEP_CREATED = "thread.run.step.created" - """Event sent when a new thread run step is created. The data of this event is of type RunStep""" - THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" - """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type - RunStep""" - THREAD_RUN_STEP_DELTA = "thread.run.step.delta" - """Event sent when a run step is being streamed. The data of this event is of type - RunStepDeltaChunk""" - THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" - """Event sent when a run step is completed. The data of this event is of type RunStep""" - THREAD_RUN_STEP_FAILED = "thread.run.step.failed" - """Event sent when a run step fails. The data of this event is of type RunStep""" - THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" - """Event sent when a run step is cancelled. The data of this event is of type RunStep""" - THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" - """Event sent when a run step is expired. The data of this event is of type RunStep""" - THREAD_MESSAGE_CREATED = "thread.message.created" - """Event sent when a new message is created. The data of this event is of type ThreadMessage""" - THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" - """Event sent when a message moves to ``in_progress`` status. The data of this event is of type - ThreadMessage""" - THREAD_MESSAGE_DELTA = "thread.message.delta" - """Event sent when a message is being streamed. The data of this event is of type - MessageDeltaChunk""" - THREAD_MESSAGE_COMPLETED = "thread.message.completed" - """Event sent when a message is completed. The data of this event is of type ThreadMessage""" - THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" - """Event sent before a message is completed. The data of this event is of type ThreadMessage""" - ERROR = "error" - """Event sent when an error occurs, such as an internal server error or a timeout.""" - DONE = "done" - """Event sent when the stream is done.""" - - -class ApiResponseFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Possible API response formats.""" - - TEXT = "text" - """``text`` format should be used for requests involving any sort of ToolCall.""" - JSON_OBJECT = "json_object" - """Using ``json_object`` format will limit the usage of ToolCall to only functions.""" - - -class AuthenticationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Authentication type used by Azure AI service to connect to another service.""" - - API_KEY = "ApiKey" - """API Key authentication""" - AAD = "AAD" - """Entra ID authentication""" - SAS = "SAS" - """Shared Access Signature (SAS) authentication""" - - -class ConnectionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The Type (or category) of the connection.""" - - AZURE_OPEN_AI = "AzureOpenAI" - """Azure OpenAI service""" - SERVERLESS = "Serverless" - """Serverless API service""" - AZURE_BLOB_STORAGE = "AzureBlob" - """Azure Blob Storage""" - AI_SERVICES = "AIServices" - """Azure AI Services""" - - -class DoneEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Terminal event indicating the successful end of a stream.""" - - DONE = "done" - """Event sent when the stream is done.""" - - -class ErrorEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Terminal event indicating a server side error while streaming.""" - - ERROR = "error" - """Event sent when an error occurs, such as an internal server error or a timeout.""" - - -class FilePurpose(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The possible values denoting the intended usage of a file.""" - - FINE_TUNE = "fine-tune" - """Indicates a file is used for fine tuning input.""" - FINE_TUNE_RESULTS = "fine-tune-results" - """Indicates a file is used for fine tuning results.""" - AGENTS = "assistants" - """Indicates a file is used as input to agents.""" - AGENTS_OUTPUT = "assistants_output" - """Indicates a file is used as output by agents.""" - BATCH = "batch" - """Indicates a file is used as input to .""" - BATCH_OUTPUT = "batch_output" - """Indicates a file is used as output by a vector store batch operation.""" - VISION = "vision" - """Indicates a file is used as input to a vision operation.""" - - -class FileState(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The state of the file.""" - - UPLOADED = "uploaded" - """The file has been uploaded but it's not yet processed. This state is not returned by Azure - OpenAI and exposed only for - compatibility. It can be categorized as an inactive state.""" - PENDING = "pending" - """The operation was created and is not queued to be processed in the future. It can be - categorized as an inactive state.""" - RUNNING = "running" - """The operation has started to be processed. It can be categorized as an active state.""" - PROCESSED = "processed" - """The operation has successfully processed and is ready for consumption. It can be categorized as - a terminal state.""" - ERROR = "error" - """The operation has completed processing with a failure and cannot be further consumed. It can be - categorized as a terminal state.""" - DELETING = "deleting" - """The entity is in the process to be deleted. This state is not returned by Azure OpenAI and - exposed only for compatibility. - It can be categorized as an active state.""" - DELETED = "deleted" - """The entity has been deleted but may still be referenced by other entities predating the - deletion. It can be categorized as a - terminal state.""" - - -class Frequency(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Frequency of the schedule - day, week, month, hour, minute.""" - - MONTH = "Month" - WEEK = "Week" - DAY = "Day" - HOUR = "Hour" - MINUTE = "Minute" - - -class IncompleteRunDetails(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The reason why the run is incomplete. This will point to which specific token limit was reached - over the course of the run. - """ - - MAX_COMPLETION_TOKENS = "max_completion_tokens" - """Maximum completion tokens exceeded""" - MAX_PROMPT_TOKENS = "max_prompt_tokens" - """Maximum prompt tokens exceeded""" - - -class ListSortOrder(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The available sorting options when requesting a list of response objects.""" - - ASCENDING = "asc" - """Specifies an ascending sort order.""" - DESCENDING = "desc" - """Specifies a descending sort order.""" - - -class MessageIncompleteDetailsReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """A set of reasons describing why a message is marked as incomplete.""" - - CONTENT_FILTER = "content_filter" - """The run generating the message was terminated due to content filter flagging.""" - MAX_TOKENS = "max_tokens" - """The run generating the message exhausted available tokens before completion.""" - RUN_CANCELLED = "run_cancelled" - """The run generating the message was cancelled before completion.""" - RUN_FAILED = "run_failed" - """The run generating the message failed.""" - RUN_EXPIRED = "run_expired" - """The run generating the message expired.""" - - -class MessageRole(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The possible values for roles attributed to messages in a thread.""" - - USER = "user" - """The role representing the end-user.""" - AGENT = "assistant" - """The role representing the agent.""" - - -class MessageStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The possible execution status values for a thread message.""" - - IN_PROGRESS = "in_progress" - """A run is currently creating this message.""" - INCOMPLETE = "incomplete" - """This message is incomplete. See incomplete_details for more information.""" - COMPLETED = "completed" - """This message was successfully completed by a run.""" - - -class MessageStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Message operation related streaming events.""" - - THREAD_MESSAGE_CREATED = "thread.message.created" - """Event sent when a new message is created. The data of this event is of type ThreadMessage""" - THREAD_MESSAGE_IN_PROGRESS = "thread.message.in_progress" - """Event sent when a message moves to ``in_progress`` status. The data of this event is of type - ThreadMessage""" - THREAD_MESSAGE_DELTA = "thread.message.delta" - """Event sent when a message is being streamed. The data of this event is of type - MessageDeltaChunk""" - THREAD_MESSAGE_COMPLETED = "thread.message.completed" - """Event sent when a message is completed. The data of this event is of type ThreadMessage""" - THREAD_MESSAGE_INCOMPLETE = "thread.message.incomplete" - """Event sent before a message is completed. The data of this event is of type ThreadMessage""" - - -class RunStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Possible values for the status of an agent thread run.""" - - QUEUED = "queued" - """Represents a run that is queued to start.""" - IN_PROGRESS = "in_progress" - """Represents a run that is in progress.""" - REQUIRES_ACTION = "requires_action" - """Represents a run that needs another operation, such as tool output submission, to continue.""" - CANCELLING = "cancelling" - """Represents a run that is in the process of cancellation.""" - CANCELLED = "cancelled" - """Represents a run that has been cancelled.""" - FAILED = "failed" - """Represents a run that failed.""" - COMPLETED = "completed" - """Represents a run that successfully completed.""" - EXPIRED = "expired" - """Represents a run that expired before it could otherwise finish.""" - - -class RunStepErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Possible error code values attributable to a failed run step.""" - - SERVER_ERROR = "server_error" - """Represents a server error.""" - RATE_LIMIT_EXCEEDED = "rate_limit_exceeded" - """Represents an error indicating configured rate limits were exceeded.""" - - -class RunStepStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Possible values for the status of a run step.""" - - IN_PROGRESS = "in_progress" - """Represents a run step still in progress.""" - CANCELLED = "cancelled" - """Represents a run step that was cancelled.""" - FAILED = "failed" - """Represents a run step that failed.""" - COMPLETED = "completed" - """Represents a run step that successfully completed.""" - EXPIRED = "expired" - """Represents a run step that expired before otherwise finishing.""" - - -class RunStepStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Run step operation related streaming events.""" - - THREAD_RUN_STEP_CREATED = "thread.run.step.created" - """Event sent when a new thread run step is created. The data of this event is of type RunStep""" - THREAD_RUN_STEP_IN_PROGRESS = "thread.run.step.in_progress" - """Event sent when a run step moves to ``in_progress`` status. The data of this event is of type - RunStep""" - THREAD_RUN_STEP_DELTA = "thread.run.step.delta" - """Event sent when a run step is being streamed. The data of this event is of type - RunStepDeltaChunk""" - THREAD_RUN_STEP_COMPLETED = "thread.run.step.completed" - """Event sent when a run step is completed. The data of this event is of type RunStep""" - THREAD_RUN_STEP_FAILED = "thread.run.step.failed" - """Event sent when a run step fails. The data of this event is of type RunStep""" - THREAD_RUN_STEP_CANCELLED = "thread.run.step.cancelled" - """Event sent when a run step is cancelled. The data of this event is of type RunStep""" - THREAD_RUN_STEP_EXPIRED = "thread.run.step.expired" - """Event sent when a run step is expired. The data of this event is of type RunStep""" - - -class RunStepType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The possible types of run steps.""" - - MESSAGE_CREATION = "message_creation" - """Represents a run step to create a message.""" - TOOL_CALLS = "tool_calls" - """Represents a run step that calls tools.""" - - -class RunStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Run operation related streaming events.""" - - THREAD_RUN_CREATED = "thread.run.created" - """Event sent when a new run is created. The data of this event is of type ThreadRun""" - THREAD_RUN_QUEUED = "thread.run.queued" - """Event sent when a run moves to ``queued`` status. The data of this event is of type ThreadRun""" - THREAD_RUN_IN_PROGRESS = "thread.run.in_progress" - """Event sent when a run moves to ``in_progress`` status. The data of this event is of type - ThreadRun""" - THREAD_RUN_REQUIRES_ACTION = "thread.run.requires_action" - """Event sent when a run moves to ``requires_action`` status. The data of this event is of type - ThreadRun""" - THREAD_RUN_COMPLETED = "thread.run.completed" - """Event sent when a run is completed. The data of this event is of type ThreadRun""" - THREAD_RUN_FAILED = "thread.run.failed" - """Event sent when a run fails. The data of this event is of type ThreadRun""" - THREAD_RUN_CANCELLING = "thread.run.cancelling" - """Event sent when a run moves to ``cancelling`` status. The data of this event is of type - ThreadRun""" - THREAD_RUN_CANCELLED = "thread.run.cancelled" - """Event sent when a run is cancelled. The data of this event is of type ThreadRun""" - THREAD_RUN_EXPIRED = "thread.run.expired" - """Event sent when a run is expired. The data of this event is of type ThreadRun""" - - -class ThreadStreamEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Thread operation related streaming events.""" - - THREAD_CREATED = "thread.created" - """Event sent when a new thread is created. The data of this event is of type AgentThread""" - - -class TruncationStrategy(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Possible truncation strategies for the thread.""" - - AUTO = "auto" - """Default value. Messages in the middle of the thread will be dropped to fit the context length - of the model.""" - LAST_MESSAGES = "last_messages" - """The thread will truncate to the ``lastMessages`` count of recent messages.""" - - -class VectorStoreChunkingStrategyRequestType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Type of chunking strategy.""" - - AUTO = "auto" - STATIC = "static" - - -class VectorStoreChunkingStrategyResponseType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Type of chunking strategy.""" - - OTHER = "other" - STATIC = "static" - - -class VectorStoreExpirationPolicyAnchor(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Describes the relationship between the days and the expiration of this vector store.""" - - LAST_ACTIVE_AT = "last_active_at" - """The expiration policy is based on the last time the vector store was active.""" - - -class VectorStoreFileBatchStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The status of the vector store file batch.""" - - IN_PROGRESS = "in_progress" - """The vector store is still processing this file batch.""" - COMPLETED = "completed" - """the vector store file batch is ready for use.""" - CANCELLED = "cancelled" - """The vector store file batch was cancelled.""" - FAILED = "failed" - """The vector store file batch failed to process.""" - - -class VectorStoreFileErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Error code variants for vector store file processing.""" - - INTERNAL_ERROR = "internal_error" - """An internal error occurred.""" - FILE_NOT_FOUND = "file_not_found" - """The file was not found.""" - PARSING_ERROR = "parsing_error" - """The file could not be parsed.""" - UNHANDLED_MIME_TYPE = "unhandled_mime_type" - """The file has an unhandled mime type.""" - - -class VectorStoreFileStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Vector store file status.""" - - IN_PROGRESS = "in_progress" - """The file is currently being processed.""" - COMPLETED = "completed" - """The file has been successfully processed.""" - FAILED = "failed" - """The file has failed to process.""" - CANCELLED = "cancelled" - """The file was cancelled.""" - - -class VectorStoreFileStatusFilter(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Query parameter filter for vector store file retrieval endpoint.""" - - IN_PROGRESS = "in_progress" - """Retrieve only files that are currently being processed""" - COMPLETED = "completed" - """Retrieve only files that have been successfully processed""" - FAILED = "failed" - """Retrieve only files that have failed to process""" - CANCELLED = "cancelled" - """Retrieve only files that were cancelled""" - - -class VectorStoreStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Vector store possible status.""" - - EXPIRED = "expired" - """expired status indicates that this vector store has expired and is no longer available for use.""" - IN_PROGRESS = "in_progress" - """in_progress status indicates that this vector store is still processing files.""" - COMPLETED = "completed" - """completed status indicates that this vector store is ready for use.""" - - -class WeekDays(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """WeekDay of the schedule - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday.""" - - MONDAY = "Monday" - TUESDAY = "Tuesday" - WEDNESDAY = "Wednesday" - THURSDAY = "Thursday" - FRIDAY = "Friday" - SATURDAY = "Saturday" - SUNDAY = "Sunday" diff --git a/sdk/ai/azure-ai-project/azure/ai/project/models/_models.py b/sdk/ai/azure-ai-project/azure/ai/project/models/_models.py deleted file mode 100644 index 45f9b650cfd1..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/models/_models.py +++ /dev/null @@ -1,6105 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# pylint: disable=useless-super-delegation - -import datetime -from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload - -from .. import _model_base -from .._model_base import rest_discriminator, rest_field -from ._enums import ( - AuthenticationType, - RunStepType, - VectorStoreChunkingStrategyRequestType, - VectorStoreChunkingStrategyResponseType, -) - -if TYPE_CHECKING: - from .. import _types, models as _models - - -class Agent(_model_base.Model): - """Represents an agent that can call the model and use tools. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always assistant. Required. Default value is - "assistant". - :vartype object: str - :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar name: The name of the agent. Required. - :vartype name: str - :ivar description: The description of the agent. Required. - :vartype description: str - :ivar model: The ID of the model to use. Required. - :vartype model: str - :ivar instructions: The system instructions for the agent to use. Required. - :vartype instructions: str - :ivar tools: The collection of tools enabled for the agent. Required. - :vartype tools: list[~azure.ai.project.models.ToolDefinition] - :ivar tool_resources: A set of resources that are used by the agent's tools. The resources are - specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Required. - :vartype tool_resources: ~azure.ai.project.models.ToolResources - :ivar temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Required. - :vartype temperature: float - :ivar top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Required. - :vartype top_p: float - :ivar response_format: The response format of the tool calls used by this agent. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat - :vartype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode or - ~azure.ai.project.models.AgentsApiResponseFormat - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required. - :vartype metadata: dict[str, str] - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["assistant"] = rest_field() - """The object type, which is always assistant. Required. Default value is \"assistant\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this object was created. Required.""" - name: str = rest_field() - """The name of the agent. Required.""" - description: str = rest_field() - """The description of the agent. Required.""" - model: str = rest_field() - """The ID of the model to use. Required.""" - instructions: str = rest_field() - """The system instructions for the agent to use. Required.""" - tools: List["_models.ToolDefinition"] = rest_field() - """The collection of tools enabled for the agent. Required.""" - tool_resources: "_models.ToolResources" = rest_field() - """A set of resources that are used by the agent's tools. The resources are specific to the type - of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Required.""" - temperature: float = rest_field() - """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output - more random, - while lower values like 0.2 will make it more focused and deterministic. Required.""" - top_p: float = rest_field() - """An alternative to sampling with temperature, called nucleus sampling, where the model considers - the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Required.""" - response_format: Optional["_types.AgentsApiResponseFormatOption"] = rest_field() - """The response format of the tool calls used by this agent. Is one of the following types: str, - Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat""" - metadata: Dict[str, str] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - created_at: datetime.datetime, - name: str, - description: str, - model: str, - instructions: str, - tools: List["_models.ToolDefinition"], - tool_resources: "_models.ToolResources", - temperature: float, - top_p: float, - metadata: Dict[str, str], - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["assistant"] = "assistant" - - -class AgentDeletionStatus(_model_base.Model): - """The status of an agent deletion operation. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The ID of the resource specified for deletion. Required. - :vartype id: str - :ivar deleted: A value indicating whether deletion was successful. Required. - :vartype deleted: bool - :ivar object: The object type, which is always 'assistant.deleted'. Required. Default value is - "assistant.deleted". - :vartype object: str - """ - - id: str = rest_field() - """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() - """A value indicating whether deletion was successful. Required.""" - object: Literal["assistant.deleted"] = rest_field() - """The object type, which is always 'assistant.deleted'. Required. Default value is - \"assistant.deleted\".""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - deleted: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["assistant.deleted"] = "assistant.deleted" - - -class AgentsApiResponseFormat(_model_base.Model): - """An object describing the expected output of the model. If ``json_object`` only ``function`` - type ``tools`` are allowed to be passed to the Run. - If ``text`` the model can return text or any value needed. - - :ivar type: Must be one of ``text`` or ``json_object``. Known values are: "text" and - "json_object". - :vartype type: str or ~azure.ai.project.models.ApiResponseFormat - """ - - type: Optional[Union[str, "_models.ApiResponseFormat"]] = rest_field() - """Must be one of ``text`` or ``json_object``. Known values are: \"text\" and \"json_object\".""" - - @overload - def __init__( - self, - *, - type: Optional[Union[str, "_models.ApiResponseFormat"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class AgentsNamedToolChoice(_model_base.Model): - """Specifies a tool the model should use. Use to force the model to call a specific tool. - - - :ivar type: the type of tool. If type is ``function``\\ , the function name must be set. - Required. Known values are: "function", "code_interpreter", "file_search", "bing_grounding", - "microsoft_fabric", "sharepoint", and "azure_ai_search". - :vartype type: str or ~azure.ai.project.models.AgentsNamedToolChoiceType - :ivar function: The name of the function to call. - :vartype function: ~azure.ai.project.models.FunctionName - """ - - type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field() - """the type of tool. If type is ``function``\ , the function name must be set. Required. Known - values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", - \"microsoft_fabric\", \"sharepoint\", and \"azure_ai_search\".""" - function: Optional["_models.FunctionName"] = rest_field() - """The name of the function to call.""" - - @overload - def __init__( - self, - *, - type: Union[str, "_models.AgentsNamedToolChoiceType"], - function: Optional["_models.FunctionName"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class AgentThread(_model_base.Model): - """Information about a single thread associated with an agent. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always 'thread'. Required. Default value is "thread". - :vartype object: str - :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar tool_resources: A set of resources that are made available to the agent's tools in this - thread. The resources are specific to the type - of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the - ``file_search`` tool requires a list - of vector store IDs. Required. - :vartype tool_resources: ~azure.ai.project.models.ToolResources - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required. - :vartype metadata: dict[str, str] - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["thread"] = rest_field() - """The object type, which is always 'thread'. Required. Default value is \"thread\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this object was created. Required.""" - tool_resources: "_models.ToolResources" = rest_field() - """A set of resources that are made available to the agent's tools in this thread. The resources - are specific to the type - of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the - ``file_search`` tool requires a list - of vector store IDs. Required.""" - metadata: Dict[str, str] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - created_at: datetime.datetime, - tool_resources: "_models.ToolResources", - metadata: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread"] = "thread" - - -class AgentThreadCreationOptions(_model_base.Model): - """The details used to create a new agent thread. - - :ivar messages: The initial messages to associate with the new thread. - :vartype messages: list[~azure.ai.project.models.ThreadMessageOptions] - :ivar tool_resources: A set of resources that are made available to the agent's tools in this - thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. - :vartype tool_resources: ~azure.ai.project.models.ToolResources - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. - :vartype metadata: dict[str, str] - """ - - messages: Optional[List["_models.ThreadMessageOptions"]] = rest_field() - """The initial messages to associate with the new thread.""" - tool_resources: Optional["_models.ToolResources"] = rest_field() - """A set of resources that are made available to the agent's tools in this thread. The resources - are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the - ``file_search`` tool requires - a list of vector store IDs.""" - metadata: Optional[Dict[str, str]] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length.""" - - @overload - def __init__( - self, - *, - messages: Optional[List["_models.ThreadMessageOptions"]] = None, - tool_resources: Optional["_models.ToolResources"] = None, - metadata: Optional[Dict[str, str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class InputData(_model_base.Model): - """Abstract data class for input data configuration. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ApplicationInsightsConfiguration, Dataset - - - :ivar type: Type of the data. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """Type of the data. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class ApplicationInsightsConfiguration(InputData, discriminator="app_insights"): - """Data Source for Application Insights. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar type: Required. Default value is "app_insights". - :vartype type: str - :ivar resource_id: LogAnalytic Workspace resourceID associated with ApplicationInsights. - Required. - :vartype resource_id: str - :ivar query: Query to fetch the data. Required. - :vartype query: str - :ivar service_name: Service name. Required. - :vartype service_name: str - :ivar connection_string: Connection String to connect to ApplicationInsights. - :vartype connection_string: str - """ - - type: Literal["app_insights"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore - """Required. Default value is \"app_insights\".""" - resource_id: str = rest_field(name="resourceId") - """LogAnalytic Workspace resourceID associated with ApplicationInsights. Required.""" - query: str = rest_field() - """Query to fetch the data. Required.""" - service_name: str = rest_field(name="serviceName") - """Service name. Required.""" - connection_string: Optional[str] = rest_field(name="connectionString") - """Connection String to connect to ApplicationInsights.""" - - @overload - def __init__( - self, - *, - resource_id: str, - query: str, - service_name: str, - connection_string: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="app_insights", **kwargs) - - -class AzureAISearchResource(_model_base.Model): - """A set of index resources used by the ``azure_ai_search`` tool. - - :ivar index_list: The indices attached to this agent. There can be a maximum of 1 index - resource attached to the agent. - :vartype index_list: list[~azure.ai.project.models.IndexResource] - """ - - index_list: Optional[List["_models.IndexResource"]] = rest_field(name="indexes") - """The indices attached to this agent. There can be a maximum of 1 index - resource attached to the agent.""" - - @overload - def __init__( - self, - *, - index_list: Optional[List["_models.IndexResource"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class ToolDefinition(_model_base.Model): - """An abstract representation of an input tool definition that an agent can use. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - AzureAISearchToolDefinition, BingGroundingToolDefinition, CodeInterpreterToolDefinition, - FileSearchToolDefinition, FunctionToolDefinition, MicrosoftFabricToolDefinition, - SharepointToolDefinition - - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class AzureAISearchToolDefinition(ToolDefinition, discriminator="azure_ai_search"): - """The input definition information for an Azure AI search tool as used to configure an agent. - - - :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is - "azure_ai_search". - :vartype type: str - """ - - type: Literal["azure_ai_search"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'azure_ai_search'. Required. Default value is - \"azure_ai_search\".""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="azure_ai_search", **kwargs) - - -class BingGroundingToolDefinition(ToolDefinition, discriminator="bing_grounding"): - """The input definition information for a bing grounding search tool as used to configure an - agent. - - - :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is - "bing_grounding". - :vartype type: str - """ - - type: Literal["bing_grounding"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'bing_grounding'. Required. Default value is - \"bing_grounding\".""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="bing_grounding", **kwargs) - - -class CodeInterpreterToolDefinition(ToolDefinition, discriminator="code_interpreter"): - """The input definition information for a code interpreter tool as used to configure an agent. - - - :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is - "code_interpreter". - :vartype type: str - """ - - type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'code_interpreter'. Required. Default value is - \"code_interpreter\".""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="code_interpreter", **kwargs) - - -class CodeInterpreterToolResource(_model_base.Model): - """A set of resources that are used by the ``code_interpreter`` tool. - - :ivar file_ids: A list of file IDs made available to the ``code_interpreter`` tool. There can - be a maximum of 20 files - associated with the tool. - :vartype file_ids: list[str] - """ - - file_ids: Optional[List[str]] = rest_field() - """A list of file IDs made available to the ``code_interpreter`` tool. There can be a maximum of - 20 files - associated with the tool.""" - - @overload - def __init__( - self, - *, - file_ids: Optional[List[str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class ConnectionListResource(_model_base.Model): - """A set of connection resources currently used by either the ``bing_grounding``\\ , - ``microsoft_fabric``\\ , or ``sharepoint`` tools. - - :ivar connection_list: The connections attached to this agent. There can be a maximum of 1 - connection - resource attached to the agent. - :vartype connection_list: list[~azure.ai.project.models.ConnectionResource] - """ - - connection_list: Optional[List["_models.ConnectionResource"]] = rest_field(name="connections") - """The connections attached to this agent. There can be a maximum of 1 connection - resource attached to the agent.""" - - @overload - def __init__( - self, - *, - connection_list: Optional[List["_models.ConnectionResource"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class ConnectionProperties(_model_base.Model): - """Connection properties. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ConnectionPropertiesAADAuth, ConnectionPropertiesApiKeyAuth, ConnectionPropertiesSASAuth - - - :ivar auth_type: Authentication type of the connection target. Required. Known values are: - "ApiKey", "AAD", and "SAS". - :vartype auth_type: str or ~azure.ai.project.models.AuthenticationType - """ - - __mapping__: Dict[str, _model_base.Model] = {} - auth_type: str = rest_discriminator(name="authType") - """Authentication type of the connection target. Required. Known values are: \"ApiKey\", \"AAD\", - and \"SAS\".""" - - -class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): - """Connection properties for connections with AAD authentication (aka ``Entra ID passthrough``\\ - ). - - - :ivar auth_type: Authentication type of the connection target. Required. Entra ID - authentication - :vartype auth_type: str or ~azure.ai.project.models.AAD - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". - :vartype category: str or ~azure.ai.project.models.ConnectionType - :ivar target: The connection URL to be used for this service. Required. - :vartype target: str - """ - - auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore - """Authentication type of the connection target. Required. Entra ID authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", and \"AIServices\".""" - target: str = rest_field() - """The connection URL to be used for this service. Required.""" - - -class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey"): - """Connection properties for connections with API key authentication. - - - :ivar auth_type: Authentication type of the connection target. Required. API Key authentication - :vartype auth_type: str or ~azure.ai.project.models.API_KEY - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". - :vartype category: str or ~azure.ai.project.models.ConnectionType - :ivar credentials: Credentials will only be present for authType=ApiKey. Required. - :vartype credentials: ~azure.ai.project.models._models.CredentialsApiKeyAuth - :ivar target: The connection URL to be used for this service. Required. - :vartype target: str - """ - - auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore - """Authentication type of the connection target. Required. API Key authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", and \"AIServices\".""" - credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() - """Credentials will only be present for authType=ApiKey. Required.""" - target: str = rest_field() - """The connection URL to be used for this service. Required.""" - - -class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): - """Connection properties for connections with SAS authentication. - - - :ivar auth_type: Authentication type of the connection target. Required. Shared Access - Signature (SAS) authentication - :vartype auth_type: str or ~azure.ai.project.models.SAS - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". - :vartype category: str or ~azure.ai.project.models.ConnectionType - :ivar credentials: Credentials will only be present for authType=ApiKey. Required. - :vartype credentials: ~azure.ai.project.models._models.CredentialsSASAuth - :ivar target: The connection URL to be used for this service. Required. - :vartype target: str - """ - - auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType") # type: ignore - """Authentication type of the connection target. Required. Shared Access Signature (SAS) - authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", and \"AIServices\".""" - credentials: "_models._models.CredentialsSASAuth" = rest_field() - """Credentials will only be present for authType=ApiKey. Required.""" - target: str = rest_field() - """The connection URL to be used for this service. Required.""" - - -class ConnectionResource(_model_base.Model): - """A connection resource. - - - :ivar connection_id: A connection in a ConnectionListResource attached to this agent. Required. - :vartype connection_id: str - """ - - connection_id: str = rest_field() - """A connection in a ConnectionListResource attached to this agent. Required.""" - - @overload - def __init__( - self, - *, - connection_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class ConnectionsListResponse(_model_base.Model): - """Response from the list operation. - - - :ivar value: A list of connection list secrets. Required. - :vartype value: list[~azure.ai.project.models._models.ConnectionsListSecretsResponse] - """ - - value: List["_models._models.ConnectionsListSecretsResponse"] = rest_field() - """A list of connection list secrets. Required.""" - - -class ConnectionsListSecretsResponse(_model_base.Model): - """Response from the listSecrets operation. - - - :ivar id: A unique identifier for the connection. Required. - :vartype id: str - :ivar name: The name of the resource. Required. - :vartype name: str - :ivar properties: The properties of the resource. Required. - :vartype properties: ~azure.ai.project.models._models.ConnectionProperties - """ - - id: str = rest_field() - """A unique identifier for the connection. Required.""" - name: str = rest_field() - """The name of the resource. Required.""" - properties: "_models._models.ConnectionProperties" = rest_field() - """The properties of the resource. Required.""" - - -class CredentialsApiKeyAuth(_model_base.Model): - """The credentials needed for API key authentication. - - - :ivar key: The API key. Required. - :vartype key: str - """ - - key: str = rest_field() - """The API key. Required.""" - - -class CredentialsSASAuth(_model_base.Model): - """The credentials needed for Shared Access Signatures (SAS) authentication. - - - :ivar sas: The Shared Access Signatures (SAS) token. Required. - :vartype sas: str - """ - - sas: str = rest_field(name="SAS") - """The Shared Access Signatures (SAS) token. Required.""" - - -class Trigger(_model_base.Model): - """Abstract data class for input data configuration. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - CronTrigger, RecurrenceTrigger - - - :ivar type: Type of the trigger. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """Type of the trigger. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class CronTrigger(Trigger, discriminator="Cron"): - """Cron Trigger Definition. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar type: Required. Default value is "Cron". - :vartype type: str - :ivar expression: Cron expression for the trigger. Required. - :vartype expression: str - """ - - type: Literal["Cron"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore - """Required. Default value is \"Cron\".""" - expression: str = rest_field() - """Cron expression for the trigger. Required.""" - - @overload - def __init__( - self, - *, - expression: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="Cron", **kwargs) - - -class Dataset(InputData, discriminator="dataset"): - """Dataset as source for evaluation. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar type: Required. Default value is "dataset". - :vartype type: str - :ivar id: Evaluation input data. Required. - :vartype id: str - """ - - type: Literal["dataset"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore - """Required. Default value is \"dataset\".""" - id: str = rest_field() - """Evaluation input data. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="dataset", **kwargs) - - -class Evaluation(_model_base.Model): - """Evaluation Definition. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: Identifier of the evaluation. Required. - :vartype id: str - :ivar data: Data for evaluation. Required. - :vartype data: ~azure.ai.project.models.InputData - :ivar display_name: Display Name for evaluation. It helps to find evaluation easily in AI - Studio. It does not need to be unique. - :vartype display_name: str - :ivar description: Description of the evaluation. It can be used to store additional - information about the evaluation and is mutable. - :vartype description: str - :ivar system_data: Metadata containing createdBy and modifiedBy information. - :vartype system_data: ~azure.ai.project.models.SystemData - :ivar status: Status of the evaluation. It is set by service and is read-only. - :vartype status: str - :ivar tags: Evaluation's tags. Unlike properties, tags are fully mutable. - :vartype tags: dict[str, str] - :ivar properties: Evaluation's properties. Unlike tags, properties are add-only. Once added, a - property cannot be removed. - :vartype properties: dict[str, str] - :ivar evaluators: Evaluators to be used for the evaluation. Required. - :vartype evaluators: dict[str, ~azure.ai.project.models.EvaluatorConfiguration] - """ - - id: str = rest_field(visibility=["read"]) - """Identifier of the evaluation. Required.""" - data: "_models.InputData" = rest_field(visibility=["read", "create"]) - """Data for evaluation. Required.""" - display_name: Optional[str] = rest_field(name="displayName") - """Display Name for evaluation. It helps to find evaluation easily in AI Studio. It does not need - to be unique.""" - description: Optional[str] = rest_field() - """Description of the evaluation. It can be used to store additional information about the - evaluation and is mutable.""" - system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) - """Metadata containing createdBy and modifiedBy information.""" - status: Optional[str] = rest_field(visibility=["read"]) - """Status of the evaluation. It is set by service and is read-only.""" - tags: Optional[Dict[str, str]] = rest_field() - """Evaluation's tags. Unlike properties, tags are fully mutable.""" - properties: Optional[Dict[str, str]] = rest_field(visibility=["read", "create"]) - """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be - removed.""" - evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field(visibility=["read", "create"]) - """Evaluators to be used for the evaluation. Required.""" - - @overload - def __init__( - self, - *, - data: "_models.InputData", - evaluators: Dict[str, "_models.EvaluatorConfiguration"], - display_name: Optional[str] = None, - description: Optional[str] = None, - tags: Optional[Dict[str, str]] = None, - properties: Optional[Dict[str, str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class EvaluationSchedule(_model_base.Model): - """Evaluation Schedule Definition. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. - :vartype name: str - :ivar data: Data for evaluation. Required. - :vartype data: ~azure.ai.project.models.ApplicationInsightsConfiguration - :ivar description: Description of the evaluation. It can be used to store additional - information about the evaluation and is mutable. - :vartype description: str - :ivar system_data: Metadata containing createdBy and modifiedBy information. - :vartype system_data: ~azure.ai.project.models.SystemData - :ivar provisioning_status: Status of the evaluation. It is set by service and is read-only. - :vartype provisioning_status: str - :ivar tags: Evaluation's tags. Unlike properties, tags are fully mutable. - :vartype tags: dict[str, str] - :ivar properties: Evaluation's properties. Unlike tags, properties are add-only. Once added, a - property cannot be removed. - :vartype properties: dict[str, str] - :ivar evaluators: Evaluators to be used for the evaluation. Required. - :vartype evaluators: dict[str, ~azure.ai.project.models.EvaluatorConfiguration] - :ivar trigger: Trigger for the evaluation. Required. - :vartype trigger: ~azure.ai.project.models.Trigger - :ivar sampling_strategy: Sampling strategy for the evaluation. Required. - :vartype sampling_strategy: ~azure.ai.project.models.SamplingStrategy - """ - - name: str = rest_field(visibility=["read"]) - """Name of the schedule, which also serves as the unique identifier for the evaluation. Required.""" - data: "_models.ApplicationInsightsConfiguration" = rest_field(visibility=["read", "create"]) - """Data for evaluation. Required.""" - description: Optional[str] = rest_field() - """Description of the evaluation. It can be used to store additional information about the - evaluation and is mutable.""" - system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) - """Metadata containing createdBy and modifiedBy information.""" - provisioning_status: Optional[str] = rest_field(name="provisioningStatus", visibility=["read"]) - """Status of the evaluation. It is set by service and is read-only.""" - tags: Optional[Dict[str, str]] = rest_field() - """Evaluation's tags. Unlike properties, tags are fully mutable.""" - properties: Optional[Dict[str, str]] = rest_field(visibility=["read", "create"]) - """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be - removed.""" - evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field(visibility=["read", "create"]) - """Evaluators to be used for the evaluation. Required.""" - trigger: "_models.Trigger" = rest_field() - """Trigger for the evaluation. Required.""" - sampling_strategy: "_models.SamplingStrategy" = rest_field(name="samplingStrategy") - """Sampling strategy for the evaluation. Required.""" - - @overload - def __init__( - self, - *, - data: "_models.ApplicationInsightsConfiguration", - evaluators: Dict[str, "_models.EvaluatorConfiguration"], - trigger: "_models.Trigger", - sampling_strategy: "_models.SamplingStrategy", - description: Optional[str] = None, - tags: Optional[Dict[str, str]] = None, - properties: Optional[Dict[str, str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class EvaluatorConfiguration(_model_base.Model): - """Evaluator Configuration. - - - :ivar id: Identifier of the evaluator. Required. - :vartype id: str - :ivar init_params: Initialization parameters of the evaluator. - :vartype init_params: dict[str, any] - :ivar data_mapping: Data parameters of the evaluator. - :vartype data_mapping: dict[str, str] - """ - - id: str = rest_field() - """Identifier of the evaluator. Required.""" - init_params: Optional[Dict[str, Any]] = rest_field(name="initParams") - """Initialization parameters of the evaluator.""" - data_mapping: Optional[Dict[str, str]] = rest_field(name="dataMapping") - """Data parameters of the evaluator.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - init_params: Optional[Dict[str, Any]] = None, - data_mapping: Optional[Dict[str, str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class FileContentResponse(_model_base.Model): - """A response from a file get content operation. - - - :ivar content: The content of the file, in bytes. Required. - :vartype content: bytes - """ - - content: bytes = rest_field(format="base64") - """The content of the file, in bytes. Required.""" - - @overload - def __init__( - self, - *, - content: bytes, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class FileDeletionStatus(_model_base.Model): - """A status response from a file deletion operation. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The ID of the resource specified for deletion. Required. - :vartype id: str - :ivar deleted: A value indicating whether deletion was successful. Required. - :vartype deleted: bool - :ivar object: The object type, which is always 'file'. Required. Default value is "file". - :vartype object: str - """ - - id: str = rest_field() - """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() - """A value indicating whether deletion was successful. Required.""" - object: Literal["file"] = rest_field() - """The object type, which is always 'file'. Required. Default value is \"file\".""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - deleted: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["file"] = "file" - - -class FileListResponse(_model_base.Model): - """The response data from a file list operation. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always 'list'. Required. Default value is "list". - :vartype object: str - :ivar data: The files returned for the request. Required. - :vartype data: list[~azure.ai.project.models.OpenAIFile] - """ - - object: Literal["list"] = rest_field() - """The object type, which is always 'list'. Required. Default value is \"list\".""" - data: List["_models.OpenAIFile"] = rest_field() - """The files returned for the request. Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.OpenAIFile"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class FileSearchToolDefinition(ToolDefinition, discriminator="file_search"): - """The input definition information for a file search tool as used to configure an agent. - - - :ivar type: The object type, which is always 'file_search'. Required. Default value is - "file_search". - :vartype type: str - :ivar file_search: Options overrides for the file search tool. - :vartype file_search: ~azure.ai.project.models.FileSearchToolDefinitionDetails - """ - - type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" - file_search: Optional["_models.FileSearchToolDefinitionDetails"] = rest_field() - """Options overrides for the file search tool.""" - - @overload - def __init__( - self, - *, - file_search: Optional["_models.FileSearchToolDefinitionDetails"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="file_search", **kwargs) - - -class FileSearchToolDefinitionDetails(_model_base.Model): - """Options overrides for the file search tool. - - :ivar max_num_results: The maximum number of results the file search tool should output. The - default is 20 for gpt-4* models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 - inclusive. - - Note that the file search tool may output fewer than ``max_num_results`` results. See the file - search tool documentation for more information. - :vartype max_num_results: int - """ - - max_num_results: Optional[int] = rest_field() - """The maximum number of results the file search tool should output. The default is 20 for gpt-4* - models and 5 for gpt-3.5-turbo. This number should be between 1 and 50 inclusive. - - Note that the file search tool may output fewer than ``max_num_results`` results. See the file - search tool documentation for more information.""" - - @overload - def __init__( - self, - *, - max_num_results: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class FileSearchToolResource(_model_base.Model): - """A set of resources that are used by the ``file_search`` tool. - - :ivar vector_store_ids: The ID of the vector store attached to this agent. There can be a - maximum of 1 vector - store attached to the agent. - :vartype vector_store_ids: list[str] - """ - - vector_store_ids: Optional[List[str]] = rest_field() - """The ID of the vector store attached to this agent. There can be a maximum of 1 vector - store attached to the agent.""" - - @overload - def __init__( - self, - *, - vector_store_ids: Optional[List[str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class FunctionDefinition(_model_base.Model): - """The input definition information for a function. - - - :ivar name: The name of the function to be called. Required. - :vartype name: str - :ivar description: A description of what the function does, used by the model to choose when - and how to call the function. - :vartype description: str - :ivar parameters: The parameters the functions accepts, described as a JSON Schema object. - Required. - :vartype parameters: any - """ - - name: str = rest_field() - """The name of the function to be called. Required.""" - description: Optional[str] = rest_field() - """A description of what the function does, used by the model to choose when and how to call the - function.""" - parameters: Any = rest_field() - """The parameters the functions accepts, described as a JSON Schema object. Required.""" - - @overload - def __init__( - self, - *, - name: str, - parameters: Any, - description: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class FunctionName(_model_base.Model): - """The function name that will be used, if using the ``function`` tool. - - - :ivar name: The name of the function to call. Required. - :vartype name: str - """ - - name: str = rest_field() - """The name of the function to call. Required.""" - - @overload - def __init__( - self, - *, - name: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class FunctionToolDefinition(ToolDefinition, discriminator="function"): - """The input definition information for a function tool as used to configure an agent. - - - :ivar type: The object type, which is always 'function'. Required. Default value is "function". - :vartype type: str - :ivar function: The definition of the concrete function that the function tool should call. - Required. - :vartype function: ~azure.ai.project.models.FunctionDefinition - """ - - type: Literal["function"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'function'. Required. Default value is \"function\".""" - function: "_models.FunctionDefinition" = rest_field() - """The definition of the concrete function that the function tool should call. Required.""" - - @overload - def __init__( - self, - *, - function: "_models.FunctionDefinition", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="function", **kwargs) - - -class IndexResource(_model_base.Model): - """A Index resource. - - - :ivar index_connection_id: An index connection id in an IndexResource attached to this agent. - Required. - :vartype index_connection_id: str - :ivar index_name: The name of an index in an IndexResource attached to this agent. Required. - :vartype index_name: str - """ - - index_connection_id: str = rest_field() - """An index connection id in an IndexResource attached to this agent. Required.""" - index_name: str = rest_field() - """The name of an index in an IndexResource attached to this agent. Required.""" - - @overload - def __init__( - self, - *, - index_connection_id: str, - index_name: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageAttachment(_model_base.Model): - """This describes to which tools a file has been attached. - - - :ivar file_id: The ID of the file to attach to the message. Required. - :vartype file_id: str - :ivar tools: The tools to add to this file. Required. - :vartype tools: list[~azure.ai.project.models.CodeInterpreterToolDefinition or - ~azure.ai.project.models.FileSearchToolDefinition] - """ - - file_id: str = rest_field() - """The ID of the file to attach to the message. Required.""" - tools: List["_types.MessageAttachmentToolDefinition"] = rest_field() - """The tools to add to this file. Required.""" - - @overload - def __init__( - self, - *, - file_id: str, - tools: List["_types.MessageAttachmentToolDefinition"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageContent(_model_base.Model): - """An abstract representation of a single item of thread message content. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - MessageImageFileContent, MessageTextContent - - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageDelta(_model_base.Model): - """Represents the typed 'delta' payload within a streaming message delta chunk. - - - :ivar role: The entity that produced the message. Required. Known values are: "user" and - "assistant". - :vartype role: str or ~azure.ai.project.models.MessageRole - :ivar content: The content of the message as an array of text and/or images. Required. - :vartype content: list[~azure.ai.project.models.MessageDeltaContent] - """ - - role: Union[str, "_models.MessageRole"] = rest_field() - """The entity that produced the message. Required. Known values are: \"user\" and \"assistant\".""" - content: List["_models.MessageDeltaContent"] = rest_field() - """The content of the message as an array of text and/or images. Required.""" - - @overload - def __init__( - self, - *, - role: Union[str, "_models.MessageRole"], - content: List["_models.MessageDeltaContent"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageDeltaChunk(_model_base.Model): - """Represents a message delta i.e. any changed fields on a message during streaming. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier of the message, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always ``thread.message.delta``. Required. Default - value is "thread.message.delta". - :vartype object: str - :ivar delta: The delta containing the fields that have changed on the Message. Required. - :vartype delta: ~azure.ai.project.models.MessageDelta - """ - - id: str = rest_field() - """The identifier of the message, which can be referenced in API endpoints. Required.""" - object: Literal["thread.message.delta"] = rest_field() - """The object type, which is always ``thread.message.delta``. Required. Default value is - \"thread.message.delta\".""" - delta: "_models.MessageDelta" = rest_field() - """The delta containing the fields that have changed on the Message. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - delta: "_models.MessageDelta", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread.message.delta"] = "thread.message.delta" - - -class MessageDeltaContent(_model_base.Model): - """The abstract base representation of a partial streamed message content payload. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - MessageDeltaImageFileContent, MessageDeltaTextContent - - - :ivar index: The index of the content part of the message. Required. - :vartype index: int - :ivar type: The type of content for this content part. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - index: int = rest_field() - """The index of the content part of the message. Required.""" - type: str = rest_discriminator(name="type") - """The type of content for this content part. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - index: int, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageDeltaImageFileContent(MessageDeltaContent, discriminator="image_file"): - """Represents a streamed image file content part within a streaming message delta chunk. - - - :ivar index: The index of the content part of the message. Required. - :vartype index: int - :ivar type: The type of content for this content part, which is always "image_file.". Required. - Default value is "image_file". - :vartype type: str - :ivar image_file: The image_file data. - :vartype image_file: ~azure.ai.project.models.MessageDeltaImageFileContentObject - """ - - type: Literal["image_file"] = rest_discriminator(name="type") # type: ignore - """The type of content for this content part, which is always \"image_file.\". Required. Default - value is \"image_file\".""" - image_file: Optional["_models.MessageDeltaImageFileContentObject"] = rest_field() - """The image_file data.""" - - @overload - def __init__( - self, - *, - index: int, - image_file: Optional["_models.MessageDeltaImageFileContentObject"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="image_file", **kwargs) - - -class MessageDeltaImageFileContentObject(_model_base.Model): - """Represents the 'image_file' payload within streaming image file content. - - :ivar file_id: The file ID of the image in the message content. - :vartype file_id: str - """ - - file_id: Optional[str] = rest_field() - """The file ID of the image in the message content.""" - - @overload - def __init__( - self, - *, - file_id: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageDeltaTextAnnotation(_model_base.Model): - """The abstract base representation of a streamed text content part's text annotation. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - MessageDeltaTextFileCitationAnnotation, MessageDeltaTextFilePathAnnotation - - - :ivar index: The index of the annotation within a text content part. Required. - :vartype index: int - :ivar type: The type of the text content annotation. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - index: int = rest_field() - """The index of the annotation within a text content part. Required.""" - type: str = rest_discriminator(name="type") - """The type of the text content annotation. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - index: int, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageDeltaTextContent(MessageDeltaContent, discriminator="text"): - """Represents a streamed text content part within a streaming message delta chunk. - - - :ivar index: The index of the content part of the message. Required. - :vartype index: int - :ivar type: The type of content for this content part, which is always "text.". Required. - Default value is "text". - :vartype type: str - :ivar text: The text content details. - :vartype text: ~azure.ai.project.models.MessageDeltaTextContentObject - """ - - type: Literal["text"] = rest_discriminator(name="type") # type: ignore - """The type of content for this content part, which is always \"text.\". Required. Default value - is \"text\".""" - text: Optional["_models.MessageDeltaTextContentObject"] = rest_field() - """The text content details.""" - - @overload - def __init__( - self, - *, - index: int, - text: Optional["_models.MessageDeltaTextContentObject"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="text", **kwargs) - - -class MessageDeltaTextContentObject(_model_base.Model): - """Represents the data of a streamed text content part within a streaming message delta chunk. - - :ivar value: The data that makes up the text. - :vartype value: str - :ivar annotations: Annotations for the text. - :vartype annotations: list[~azure.ai.project.models.MessageDeltaTextAnnotation] - """ - - value: Optional[str] = rest_field() - """The data that makes up the text.""" - annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = rest_field() - """Annotations for the text.""" - - @overload - def __init__( - self, - *, - value: Optional[str] = None, - annotations: Optional[List["_models.MessageDeltaTextAnnotation"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageDeltaTextFileCitationAnnotation(MessageDeltaTextAnnotation, discriminator="file_citation"): - """Represents a streamed file citation applied to a streaming text content part. - - - :ivar index: The index of the annotation within a text content part. Required. - :vartype index: int - :ivar type: The type of the text content annotation, which is always "file_citation.". - Required. Default value is "file_citation". - :vartype type: str - :ivar file_citation: The file citation information. - :vartype file_citation: ~azure.ai.project.models.MessageDeltaTextFileCitationAnnotationObject - :ivar text: The text in the message content that needs to be replaced. - :vartype text: str - :ivar start_index: The start index of this annotation in the content text. - :vartype start_index: int - :ivar end_index: The end index of this annotation in the content text. - :vartype end_index: int - """ - - type: Literal["file_citation"] = rest_discriminator(name="type") # type: ignore - """The type of the text content annotation, which is always \"file_citation.\". Required. Default - value is \"file_citation\".""" - file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = rest_field() - """The file citation information.""" - text: Optional[str] = rest_field() - """The text in the message content that needs to be replaced.""" - start_index: Optional[int] = rest_field() - """The start index of this annotation in the content text.""" - end_index: Optional[int] = rest_field() - """The end index of this annotation in the content text.""" - - @overload - def __init__( - self, - *, - index: int, - file_citation: Optional["_models.MessageDeltaTextFileCitationAnnotationObject"] = None, - text: Optional[str] = None, - start_index: Optional[int] = None, - end_index: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="file_citation", **kwargs) - - -class MessageDeltaTextFileCitationAnnotationObject(_model_base.Model): # pylint: disable=name-too-long - """Represents the data of a streamed file citation as applied to a streaming text content part. - - :ivar file_id: The ID of the specific file the citation is from. - :vartype file_id: str - :ivar quote: The specific quote in the cited file. - :vartype quote: str - """ - - file_id: Optional[str] = rest_field() - """The ID of the specific file the citation is from.""" - quote: Optional[str] = rest_field() - """The specific quote in the cited file.""" - - @overload - def __init__( - self, - *, - file_id: Optional[str] = None, - quote: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageDeltaTextFilePathAnnotation(MessageDeltaTextAnnotation, discriminator="file_path"): - """Represents a streamed file path annotation applied to a streaming text content part. - - - :ivar index: The index of the annotation within a text content part. Required. - :vartype index: int - :ivar type: The type of the text content annotation, which is always "file_path.". Required. - Default value is "file_path". - :vartype type: str - :ivar file_path: The file path information. - :vartype file_path: ~azure.ai.project.models.MessageDeltaTextFilePathAnnotationObject - :ivar start_index: The start index of this annotation in the content text. - :vartype start_index: int - :ivar end_index: The end index of this annotation in the content text. - :vartype end_index: int - :ivar text: The text in the message content that needs to be replaced. - :vartype text: str - """ - - type: Literal["file_path"] = rest_discriminator(name="type") # type: ignore - """The type of the text content annotation, which is always \"file_path.\". Required. Default - value is \"file_path\".""" - file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = rest_field() - """The file path information.""" - start_index: Optional[int] = rest_field() - """The start index of this annotation in the content text.""" - end_index: Optional[int] = rest_field() - """The end index of this annotation in the content text.""" - text: Optional[str] = rest_field() - """The text in the message content that needs to be replaced.""" - - @overload - def __init__( - self, - *, - index: int, - file_path: Optional["_models.MessageDeltaTextFilePathAnnotationObject"] = None, - start_index: Optional[int] = None, - end_index: Optional[int] = None, - text: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="file_path", **kwargs) - - -class MessageDeltaTextFilePathAnnotationObject(_model_base.Model): - """Represents the data of a streamed file path annotation as applied to a streaming text content - part. - - :ivar file_id: The file ID for the annotation. - :vartype file_id: str - """ - - file_id: Optional[str] = rest_field() - """The file ID for the annotation.""" - - @overload - def __init__( - self, - *, - file_id: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageImageFileContent(MessageContent, discriminator="image_file"): - """A representation of image file content in a thread message. - - - :ivar type: The object type, which is always 'image_file'. Required. Default value is - "image_file". - :vartype type: str - :ivar image_file: The image file for this thread message content item. Required. - :vartype image_file: ~azure.ai.project.models.MessageImageFileDetails - """ - - type: Literal["image_file"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'image_file'. Required. Default value is \"image_file\".""" - image_file: "_models.MessageImageFileDetails" = rest_field() - """The image file for this thread message content item. Required.""" - - @overload - def __init__( - self, - *, - image_file: "_models.MessageImageFileDetails", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="image_file", **kwargs) - - -class MessageImageFileDetails(_model_base.Model): - """An image reference, as represented in thread message content. - - - :ivar file_id: The ID for the file associated with this image. Required. - :vartype file_id: str - """ - - file_id: str = rest_field() - """The ID for the file associated with this image. Required.""" - - @overload - def __init__( - self, - *, - file_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageIncompleteDetails(_model_base.Model): - """Information providing additional detail about a message entering an incomplete status. - - - :ivar reason: The provided reason describing why the message was marked as incomplete. - Required. Known values are: "content_filter", "max_tokens", "run_cancelled", "run_failed", and - "run_expired". - :vartype reason: str or ~azure.ai.project.models.MessageIncompleteDetailsReason - """ - - reason: Union[str, "_models.MessageIncompleteDetailsReason"] = rest_field() - """The provided reason describing why the message was marked as incomplete. Required. Known values - are: \"content_filter\", \"max_tokens\", \"run_cancelled\", \"run_failed\", and - \"run_expired\".""" - - @overload - def __init__( - self, - *, - reason: Union[str, "_models.MessageIncompleteDetailsReason"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageTextAnnotation(_model_base.Model): - """An abstract representation of an annotation to text thread message content. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - MessageTextFileCitationAnnotation, MessageTextFilePathAnnotation - - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - :ivar text: The textual content associated with this text annotation item. Required. - :vartype text: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - text: str = rest_field() - """The textual content associated with this text annotation item. Required.""" - - @overload - def __init__( - self, - *, - type: str, - text: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageTextContent(MessageContent, discriminator="text"): - """A representation of a textual item of thread message content. - - - :ivar type: The object type, which is always 'text'. Required. Default value is "text". - :vartype type: str - :ivar text: The text and associated annotations for this thread message content item. Required. - :vartype text: ~azure.ai.project.models.MessageTextDetails - """ - - type: Literal["text"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'text'. Required. Default value is \"text\".""" - text: "_models.MessageTextDetails" = rest_field() - """The text and associated annotations for this thread message content item. Required.""" - - @overload - def __init__( - self, - *, - text: "_models.MessageTextDetails", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="text", **kwargs) - - -class MessageTextDetails(_model_base.Model): - """The text and associated annotations for a single item of agent thread message content. - - - :ivar value: The text data. Required. - :vartype value: str - :ivar annotations: A list of annotations associated with this text. Required. - :vartype annotations: list[~azure.ai.project.models.MessageTextAnnotation] - """ - - value: str = rest_field() - """The text data. Required.""" - annotations: List["_models.MessageTextAnnotation"] = rest_field() - """A list of annotations associated with this text. Required.""" - - @overload - def __init__( - self, - *, - value: str, - annotations: List["_models.MessageTextAnnotation"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageTextFileCitationAnnotation(MessageTextAnnotation, discriminator="file_citation"): - """A citation within the message that points to a specific quote from a specific File associated - with the agent or the message. Generated when the agent uses the 'file_search' tool to search - files. - - - :ivar text: The textual content associated with this text annotation item. Required. - :vartype text: str - :ivar type: The object type, which is always 'file_citation'. Required. Default value is - "file_citation". - :vartype type: str - :ivar file_citation: A citation within the message that points to a specific quote from a - specific file. - Generated when the agent uses the "file_search" tool to search files. Required. - :vartype file_citation: ~azure.ai.project.models.MessageTextFileCitationDetails - :ivar start_index: The first text index associated with this text annotation. - :vartype start_index: int - :ivar end_index: The last text index associated with this text annotation. - :vartype end_index: int - """ - - type: Literal["file_citation"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'file_citation'. Required. Default value is \"file_citation\".""" - file_citation: "_models.MessageTextFileCitationDetails" = rest_field() - """A citation within the message that points to a specific quote from a specific file. - Generated when the agent uses the \"file_search\" tool to search files. Required.""" - start_index: Optional[int] = rest_field() - """The first text index associated with this text annotation.""" - end_index: Optional[int] = rest_field() - """The last text index associated with this text annotation.""" - - @overload - def __init__( - self, - *, - text: str, - file_citation: "_models.MessageTextFileCitationDetails", - start_index: Optional[int] = None, - end_index: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="file_citation", **kwargs) - - -class MessageTextFileCitationDetails(_model_base.Model): - """A representation of a file-based text citation, as used in a file-based annotation of text - thread message content. - - - :ivar file_id: The ID of the file associated with this citation. Required. - :vartype file_id: str - :ivar quote: The specific quote cited in the associated file. Required. - :vartype quote: str - """ - - file_id: str = rest_field() - """The ID of the file associated with this citation. Required.""" - quote: str = rest_field() - """The specific quote cited in the associated file. Required.""" - - @overload - def __init__( - self, - *, - file_id: str, - quote: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MessageTextFilePathAnnotation(MessageTextAnnotation, discriminator="file_path"): - """A citation within the message that points to a file located at a specific path. - - - :ivar text: The textual content associated with this text annotation item. Required. - :vartype text: str - :ivar type: The object type, which is always 'file_path'. Required. Default value is - "file_path". - :vartype type: str - :ivar file_path: A URL for the file that's generated when the agent used the code_interpreter - tool to generate a file. Required. - :vartype file_path: ~azure.ai.project.models.MessageTextFilePathDetails - :ivar start_index: The first text index associated with this text annotation. - :vartype start_index: int - :ivar end_index: The last text index associated with this text annotation. - :vartype end_index: int - """ - - type: Literal["file_path"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'file_path'. Required. Default value is \"file_path\".""" - file_path: "_models.MessageTextFilePathDetails" = rest_field() - """A URL for the file that's generated when the agent used the code_interpreter tool to generate a - file. Required.""" - start_index: Optional[int] = rest_field() - """The first text index associated with this text annotation.""" - end_index: Optional[int] = rest_field() - """The last text index associated with this text annotation.""" - - @overload - def __init__( - self, - *, - text: str, - file_path: "_models.MessageTextFilePathDetails", - start_index: Optional[int] = None, - end_index: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="file_path", **kwargs) - - -class MessageTextFilePathDetails(_model_base.Model): - """An encapsulation of an image file ID, as used by message image content. - - - :ivar file_id: The ID of the specific file that the citation is from. Required. - :vartype file_id: str - """ - - file_id: str = rest_field() - """The ID of the specific file that the citation is from. Required.""" - - @overload - def __init__( - self, - *, - file_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="microsoft_fabric"): - """The input definition information for a Microsoft Fabric tool as used to configure an agent. - - - :ivar type: The object type, which is always 'microsoft_fabric'. Required. Default value is - "microsoft_fabric". - :vartype type: str - """ - - type: Literal["microsoft_fabric"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'microsoft_fabric'. Required. Default value is - \"microsoft_fabric\".""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="microsoft_fabric", **kwargs) - - -class OpenAIFile(_model_base.Model): - """Represents an agent that can call the model and use tools. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always 'file'. Required. Default value is "file". - :vartype object: str - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar bytes: The size of the file, in bytes. Required. - :vartype bytes: int - :ivar filename: The name of the file. Required. - :vartype filename: str - :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar purpose: The intended purpose of a file. Required. Known values are: "fine-tune", - "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". - :vartype purpose: str or ~azure.ai.project.models.FilePurpose - :ivar status: The state of the file. This field is available in Azure OpenAI only. Known values - are: "uploaded", "pending", "running", "processed", "error", "deleting", and "deleted". - :vartype status: str or ~azure.ai.project.models.FileState - :ivar status_details: The error message with details in case processing of this file failed. - This field is available in Azure OpenAI only. - :vartype status_details: str - """ - - object: Literal["file"] = rest_field() - """The object type, which is always 'file'. Required. Default value is \"file\".""" - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - bytes: int = rest_field() - """The size of the file, in bytes. Required.""" - filename: str = rest_field() - """The name of the file. Required.""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this object was created. Required.""" - purpose: Union[str, "_models.FilePurpose"] = rest_field() - """The intended purpose of a file. Required. Known values are: \"fine-tune\", - \"fine-tune-results\", \"assistants\", \"assistants_output\", \"batch\", \"batch_output\", and - \"vision\".""" - status: Optional[Union[str, "_models.FileState"]] = rest_field() - """The state of the file. This field is available in Azure OpenAI only. Known values are: - \"uploaded\", \"pending\", \"running\", \"processed\", \"error\", \"deleting\", and - \"deleted\".""" - status_details: Optional[str] = rest_field() - """The error message with details in case processing of this file failed. This field is available - in Azure OpenAI only.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - bytes: int, - filename: str, - created_at: datetime.datetime, - purpose: Union[str, "_models.FilePurpose"], - status: Optional[Union[str, "_models.FileState"]] = None, - status_details: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["file"] = "file" - - -class OpenAIPageableListOfAgent(_model_base.Model): - """The response data for a requested list of items. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always list. Required. Default value is "list". - :vartype object: str - :ivar data: The requested list of items. Required. - :vartype data: list[~azure.ai.project.models.Agent] - :ivar first_id: The first ID represented in this list. Required. - :vartype first_id: str - :ivar last_id: The last ID represented in this list. Required. - :vartype last_id: str - :ivar has_more: A value indicating whether there are additional values available not captured - in this list. Required. - :vartype has_more: bool - """ - - object: Literal["list"] = rest_field() - """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.Agent"] = rest_field() - """The requested list of items. Required.""" - first_id: str = rest_field() - """The first ID represented in this list. Required.""" - last_id: str = rest_field() - """The last ID represented in this list. Required.""" - has_more: bool = rest_field() - """A value indicating whether there are additional values available not captured in this list. - Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.Agent"], - first_id: str, - last_id: str, - has_more: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class OpenAIPageableListOfRunStep(_model_base.Model): - """The response data for a requested list of items. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always list. Required. Default value is "list". - :vartype object: str - :ivar data: The requested list of items. Required. - :vartype data: list[~azure.ai.project.models.RunStep] - :ivar first_id: The first ID represented in this list. Required. - :vartype first_id: str - :ivar last_id: The last ID represented in this list. Required. - :vartype last_id: str - :ivar has_more: A value indicating whether there are additional values available not captured - in this list. Required. - :vartype has_more: bool - """ - - object: Literal["list"] = rest_field() - """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.RunStep"] = rest_field() - """The requested list of items. Required.""" - first_id: str = rest_field() - """The first ID represented in this list. Required.""" - last_id: str = rest_field() - """The last ID represented in this list. Required.""" - has_more: bool = rest_field() - """A value indicating whether there are additional values available not captured in this list. - Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.RunStep"], - first_id: str, - last_id: str, - has_more: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class OpenAIPageableListOfThreadMessage(_model_base.Model): - """The response data for a requested list of items. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always list. Required. Default value is "list". - :vartype object: str - :ivar data: The requested list of items. Required. - :vartype data: list[~azure.ai.project.models.ThreadMessage] - :ivar first_id: The first ID represented in this list. Required. - :vartype first_id: str - :ivar last_id: The last ID represented in this list. Required. - :vartype last_id: str - :ivar has_more: A value indicating whether there are additional values available not captured - in this list. Required. - :vartype has_more: bool - """ - - object: Literal["list"] = rest_field() - """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.ThreadMessage"] = rest_field() - """The requested list of items. Required.""" - first_id: str = rest_field() - """The first ID represented in this list. Required.""" - last_id: str = rest_field() - """The last ID represented in this list. Required.""" - has_more: bool = rest_field() - """A value indicating whether there are additional values available not captured in this list. - Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.ThreadMessage"], - first_id: str, - last_id: str, - has_more: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class OpenAIPageableListOfThreadRun(_model_base.Model): - """The response data for a requested list of items. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always list. Required. Default value is "list". - :vartype object: str - :ivar data: The requested list of items. Required. - :vartype data: list[~azure.ai.project.models.ThreadRun] - :ivar first_id: The first ID represented in this list. Required. - :vartype first_id: str - :ivar last_id: The last ID represented in this list. Required. - :vartype last_id: str - :ivar has_more: A value indicating whether there are additional values available not captured - in this list. Required. - :vartype has_more: bool - """ - - object: Literal["list"] = rest_field() - """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.ThreadRun"] = rest_field() - """The requested list of items. Required.""" - first_id: str = rest_field() - """The first ID represented in this list. Required.""" - last_id: str = rest_field() - """The last ID represented in this list. Required.""" - has_more: bool = rest_field() - """A value indicating whether there are additional values available not captured in this list. - Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.ThreadRun"], - first_id: str, - last_id: str, - has_more: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class OpenAIPageableListOfVectorStore(_model_base.Model): - """The response data for a requested list of items. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always list. Required. Default value is "list". - :vartype object: str - :ivar data: The requested list of items. Required. - :vartype data: list[~azure.ai.project.models.VectorStore] - :ivar first_id: The first ID represented in this list. Required. - :vartype first_id: str - :ivar last_id: The last ID represented in this list. Required. - :vartype last_id: str - :ivar has_more: A value indicating whether there are additional values available not captured - in this list. Required. - :vartype has_more: bool - """ - - object: Literal["list"] = rest_field() - """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.VectorStore"] = rest_field() - """The requested list of items. Required.""" - first_id: str = rest_field() - """The first ID represented in this list. Required.""" - last_id: str = rest_field() - """The last ID represented in this list. Required.""" - has_more: bool = rest_field() - """A value indicating whether there are additional values available not captured in this list. - Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.VectorStore"], - first_id: str, - last_id: str, - has_more: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class OpenAIPageableListOfVectorStoreFile(_model_base.Model): - """The response data for a requested list of items. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar object: The object type, which is always list. Required. Default value is "list". - :vartype object: str - :ivar data: The requested list of items. Required. - :vartype data: list[~azure.ai.project.models.VectorStoreFile] - :ivar first_id: The first ID represented in this list. Required. - :vartype first_id: str - :ivar last_id: The last ID represented in this list. Required. - :vartype last_id: str - :ivar has_more: A value indicating whether there are additional values available not captured - in this list. Required. - :vartype has_more: bool - """ - - object: Literal["list"] = rest_field() - """The object type, which is always list. Required. Default value is \"list\".""" - data: List["_models.VectorStoreFile"] = rest_field() - """The requested list of items. Required.""" - first_id: str = rest_field() - """The first ID represented in this list. Required.""" - last_id: str = rest_field() - """The last ID represented in this list. Required.""" - has_more: bool = rest_field() - """A value indicating whether there are additional values available not captured in this list. - Required.""" - - @overload - def __init__( - self, - *, - data: List["_models.VectorStoreFile"], - first_id: str, - last_id: str, - has_more: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["list"] = "list" - - -class RecurrenceSchedule(_model_base.Model): - """RecurrenceSchedule Definition. - - - :ivar hours: List of hours for the schedule. Required. - :vartype hours: list[int] - :ivar minutes: List of minutes for the schedule. Required. - :vartype minutes: list[int] - :ivar week_days: List of days for the schedule. - :vartype week_days: list[str or ~azure.ai.project.models.WeekDays] - :ivar month_days: List of month days for the schedule. - :vartype month_days: list[int] - """ - - hours: List[int] = rest_field() - """List of hours for the schedule. Required.""" - minutes: List[int] = rest_field() - """List of minutes for the schedule. Required.""" - week_days: Optional[List[Union[str, "_models.WeekDays"]]] = rest_field(name="weekDays") - """List of days for the schedule.""" - month_days: Optional[List[int]] = rest_field(name="monthDays") - """List of month days for the schedule.""" - - @overload - def __init__( - self, - *, - hours: List[int], - minutes: List[int], - week_days: Optional[List[Union[str, "_models.WeekDays"]]] = None, - month_days: Optional[List[int]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RecurrenceTrigger(Trigger, discriminator="Recurrence"): - """Recurrence Trigger Definition. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar type: Required. Default value is "Recurrence". - :vartype type: str - :ivar frequency: The frequency to trigger schedule. Required. Known values are: "Month", - "Week", "Day", "Hour", and "Minute". - :vartype frequency: str or ~azure.ai.project.models.Frequency - :ivar interval: Specifies schedule interval in conjunction with frequency. Required. - :vartype interval: int - :ivar schedule: The recurrence schedule. - :vartype schedule: ~azure.ai.project.models.RecurrenceSchedule - """ - - type: Literal["Recurrence"] = rest_discriminator(name="type", visibility=["read"]) # type: ignore - """Required. Default value is \"Recurrence\".""" - frequency: Union[str, "_models.Frequency"] = rest_field() - """The frequency to trigger schedule. Required. Known values are: \"Month\", \"Week\", \"Day\", - \"Hour\", and \"Minute\".""" - interval: int = rest_field() - """Specifies schedule interval in conjunction with frequency. Required.""" - schedule: Optional["_models.RecurrenceSchedule"] = rest_field() - """The recurrence schedule.""" - - @overload - def __init__( - self, - *, - frequency: Union[str, "_models.Frequency"], - interval: int, - schedule: Optional["_models.RecurrenceSchedule"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="Recurrence", **kwargs) - - -class RequiredAction(_model_base.Model): - """An abstract representation of a required action for an agent thread run to continue. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - SubmitToolOutputsAction - - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RequiredToolCall(_model_base.Model): - """An abstract representation a a tool invocation needed by the model to continue a run. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RequiredFunctionToolCall - - - :ivar type: The object type for the required tool call. Required. Default value is None. - :vartype type: str - :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. - Required. - :vartype id: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type for the required tool call. Required. Default value is None.""" - id: str = rest_field() - """The ID of the tool call. This ID must be referenced when submitting tool outputs. Required.""" - - @overload - def __init__( - self, - *, - type: str, - id: str, # pylint: disable=redefined-builtin - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RequiredFunctionToolCall(RequiredToolCall, discriminator="function"): - """A representation of a requested call to a function tool, needed by the model to continue - evaluation of a run. - - - :ivar id: The ID of the tool call. This ID must be referenced when submitting tool outputs. - Required. - :vartype id: str - :ivar type: The object type of the required tool call. Always 'function' for function tools. - Required. Default value is "function". - :vartype type: str - :ivar function: Detailed information about the function to be executed by the tool that - includes name and arguments. Required. - :vartype function: ~azure.ai.project.models.RequiredFunctionToolCallDetails - """ - - type: Literal["function"] = rest_discriminator(name="type") # type: ignore - """The object type of the required tool call. Always 'function' for function tools. Required. - Default value is \"function\".""" - function: "_models.RequiredFunctionToolCallDetails" = rest_field() - """Detailed information about the function to be executed by the tool that includes name and - arguments. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - function: "_models.RequiredFunctionToolCallDetails", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="function", **kwargs) - - -class RequiredFunctionToolCallDetails(_model_base.Model): - """The detailed information for a function invocation, as provided by a required action invoking a - function tool, that includes the name of and arguments to the function. - - - :ivar name: The name of the function. Required. - :vartype name: str - :ivar arguments: The arguments to use when invoking the named function, as provided by the - model. Arguments are presented as a JSON document that should be validated and parsed for - evaluation. Required. - :vartype arguments: str - """ - - name: str = rest_field() - """The name of the function. Required.""" - arguments: str = rest_field() - """The arguments to use when invoking the named function, as provided by the model. Arguments are - presented as a JSON document that should be validated and parsed for evaluation. Required.""" - - @overload - def __init__( - self, - *, - name: str, - arguments: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunCompletionUsage(_model_base.Model): - """Usage statistics related to the run. This value will be ``null`` if the run is not in a - terminal state (i.e. ``in_progress``\\ , ``queued``\\ , etc.). - - - :ivar completion_tokens: Number of completion tokens used over the course of the run. Required. - :vartype completion_tokens: int - :ivar prompt_tokens: Number of prompt tokens used over the course of the run. Required. - :vartype prompt_tokens: int - :ivar total_tokens: Total number of tokens used (prompt + completion). Required. - :vartype total_tokens: int - """ - - completion_tokens: int = rest_field() - """Number of completion tokens used over the course of the run. Required.""" - prompt_tokens: int = rest_field() - """Number of prompt tokens used over the course of the run. Required.""" - total_tokens: int = rest_field() - """Total number of tokens used (prompt + completion). Required.""" - - @overload - def __init__( - self, - *, - completion_tokens: int, - prompt_tokens: int, - total_tokens: int, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunError(_model_base.Model): - """The details of an error as encountered by an agent thread run. - - - :ivar code: The status for the error. Required. - :vartype code: str - :ivar message: The human-readable text associated with the error. Required. - :vartype message: str - """ - - code: str = rest_field() - """The status for the error. Required.""" - message: str = rest_field() - """The human-readable text associated with the error. Required.""" - - @overload - def __init__( - self, - *, - code: str, - message: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStep(_model_base.Model): - """Detailed information about a single step of an agent thread run. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always 'thread.run.step'. Required. Default value is - "thread.run.step". - :vartype object: str - :ivar type: The type of run step, which can be either message_creation or tool_calls. Required. - Known values are: "message_creation" and "tool_calls". - :vartype type: str or ~azure.ai.project.models.RunStepType - :ivar assistant_id: The ID of the agent associated with the run step. Required. - :vartype assistant_id: str - :ivar thread_id: The ID of the thread that was run. Required. - :vartype thread_id: str - :ivar run_id: The ID of the run that this run step is a part of. Required. - :vartype run_id: str - :ivar status: The status of this run step. Required. Known values are: "in_progress", - "cancelled", "failed", "completed", and "expired". - :vartype status: str or ~azure.ai.project.models.RunStepStatus - :ivar step_details: The details for this run step. Required. - :vartype step_details: ~azure.ai.project.models.RunStepDetails - :ivar last_error: If applicable, information about the last error encountered by this run step. - Required. - :vartype last_error: ~azure.ai.project.models.RunStepError - :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar expired_at: The Unix timestamp, in seconds, representing when this item expired. - Required. - :vartype expired_at: ~datetime.datetime - :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. - :vartype completed_at: ~datetime.datetime - :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. - Required. - :vartype cancelled_at: ~datetime.datetime - :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. - :vartype failed_at: ~datetime.datetime - :ivar usage: Usage statistics related to the run step. This value will be ``null`` while the - run step's status is ``in_progress``. - :vartype usage: ~azure.ai.project.models.RunStepCompletionUsage - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required. - :vartype metadata: dict[str, str] - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["thread.run.step"] = rest_field() - """The object type, which is always 'thread.run.step'. Required. Default value is - \"thread.run.step\".""" - type: Union[str, "_models.RunStepType"] = rest_field() - """The type of run step, which can be either message_creation or tool_calls. Required. Known - values are: \"message_creation\" and \"tool_calls\".""" - assistant_id: str = rest_field() - """The ID of the agent associated with the run step. Required.""" - thread_id: str = rest_field() - """The ID of the thread that was run. Required.""" - run_id: str = rest_field() - """The ID of the run that this run step is a part of. Required.""" - status: Union[str, "_models.RunStepStatus"] = rest_field() - """The status of this run step. Required. Known values are: \"in_progress\", \"cancelled\", - \"failed\", \"completed\", and \"expired\".""" - step_details: "_models.RunStepDetails" = rest_field() - """The details for this run step. Required.""" - last_error: "_models.RunStepError" = rest_field() - """If applicable, information about the last error encountered by this run step. Required.""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this object was created. Required.""" - expired_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this item expired. Required.""" - completed_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this completed. Required.""" - cancelled_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" - failed_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this failed. Required.""" - usage: Optional["_models.RunStepCompletionUsage"] = rest_field() - """Usage statistics related to the run step. This value will be ``null`` while the run step's - status is ``in_progress``.""" - metadata: Dict[str, str] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - type: Union[str, "_models.RunStepType"], - assistant_id: str, - thread_id: str, - run_id: str, - status: Union[str, "_models.RunStepStatus"], - step_details: "_models.RunStepDetails", - last_error: "_models.RunStepError", - created_at: datetime.datetime, - expired_at: datetime.datetime, - completed_at: datetime.datetime, - cancelled_at: datetime.datetime, - failed_at: datetime.datetime, - metadata: Dict[str, str], - usage: Optional["_models.RunStepCompletionUsage"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread.run.step"] = "thread.run.step" - - -class RunStepToolCall(_model_base.Model): - """An abstract representation of a detailed tool call as recorded within a run step for an - existing run. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepAzureAISearchToolCall, RunStepBingGroundingToolCall, RunStepCodeInterpreterToolCall, - RunStepFileSearchToolCall, RunStepFunctionToolCall, RunStepMicrosoftFabricToolCall, - RunStepSharepointToolCall - - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - id: str = rest_field() - """The ID of the tool call. This ID must be referenced when you submit tool outputs. Required.""" - - @overload - def __init__( - self, - *, - type: str, - id: str, # pylint: disable=redefined-builtin - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepAzureAISearchToolCall(RunStepToolCall, discriminator="azure_ai_search"): - """A record of a call to an Azure AI Search tool, issued by the model in evaluation of a defined - tool, that represents - executed Azure AI search. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'azure_ai_search'. Required. Default value is - "azure_ai_search". - :vartype type: str - :ivar azure_ai_search: Reserved for future use. Required. - :vartype azure_ai_search: dict[str, str] - """ - - type: Literal["azure_ai_search"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'azure_ai_search'. Required. Default value is - \"azure_ai_search\".""" - azure_ai_search: Dict[str, str] = rest_field() - """Reserved for future use. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - azure_ai_search: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="azure_ai_search", **kwargs) - - -class RunStepBingGroundingToolCall(RunStepToolCall, discriminator="bing_grounding"): - """A record of a call to a bing grounding tool, issued by the model in evaluation of a defined - tool, that represents - executed search with bing grounding. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is - "bing_grounding". - :vartype type: str - :ivar bing_grounding: Reserved for future use. Required. - :vartype bing_grounding: dict[str, str] - """ - - type: Literal["bing_grounding"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'bing_grounding'. Required. Default value is - \"bing_grounding\".""" - bing_grounding: Dict[str, str] = rest_field() - """Reserved for future use. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - bing_grounding: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="bing_grounding", **kwargs) - - -class RunStepCodeInterpreterToolCallOutput(_model_base.Model): - """An abstract representation of an emitted output from a code interpreter tool. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepCodeInterpreterImageOutput, RunStepCodeInterpreterLogOutput - - - :ivar type: The object type. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepCodeInterpreterImageOutput(RunStepCodeInterpreterToolCallOutput, discriminator="image"): - """A representation of an image output emitted by a code interpreter tool in response to a tool - call by the model. - - - :ivar type: The object type, which is always 'image'. Required. Default value is "image". - :vartype type: str - :ivar image: Referential information for the image associated with this output. Required. - :vartype image: ~azure.ai.project.models.RunStepCodeInterpreterImageReference - """ - - type: Literal["image"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'image'. Required. Default value is \"image\".""" - image: "_models.RunStepCodeInterpreterImageReference" = rest_field() - """Referential information for the image associated with this output. Required.""" - - @overload - def __init__( - self, - *, - image: "_models.RunStepCodeInterpreterImageReference", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="image", **kwargs) - - -class RunStepCodeInterpreterImageReference(_model_base.Model): - """An image reference emitted by a code interpreter tool in response to a tool call by the model. - - - :ivar file_id: The ID of the file associated with this image. Required. - :vartype file_id: str - """ - - file_id: str = rest_field() - """The ID of the file associated with this image. Required.""" - - @overload - def __init__( - self, - *, - file_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepCodeInterpreterLogOutput(RunStepCodeInterpreterToolCallOutput, discriminator="logs"): - """A representation of a log output emitted by a code interpreter tool in response to a tool call - by the model. - - - :ivar type: The object type, which is always 'logs'. Required. Default value is "logs". - :vartype type: str - :ivar logs: The serialized log output emitted by the code interpreter. Required. - :vartype logs: str - """ - - type: Literal["logs"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'logs'. Required. Default value is \"logs\".""" - logs: str = rest_field() - """The serialized log output emitted by the code interpreter. Required.""" - - @overload - def __init__( - self, - *, - logs: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="logs", **kwargs) - - -class RunStepCodeInterpreterToolCall(RunStepToolCall, discriminator="code_interpreter"): - """A record of a call to a code interpreter tool, issued by the model in evaluation of a defined - tool, that - represents inputs and outputs consumed and emitted by the code interpreter. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'code_interpreter'. Required. Default value is - "code_interpreter". - :vartype type: str - :ivar code_interpreter: The details of the tool call to the code interpreter tool. Required. - :vartype code_interpreter: ~azure.ai.project.models.RunStepCodeInterpreterToolCallDetails - """ - - type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'code_interpreter'. Required. Default value is - \"code_interpreter\".""" - code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails" = rest_field() - """The details of the tool call to the code interpreter tool. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - code_interpreter: "_models.RunStepCodeInterpreterToolCallDetails", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="code_interpreter", **kwargs) - - -class RunStepCodeInterpreterToolCallDetails(_model_base.Model): - """The detailed information about a code interpreter invocation by the model. - - - :ivar input: The input provided by the model to the code interpreter tool. Required. - :vartype input: str - :ivar outputs: The outputs produced by the code interpreter tool back to the model in response - to the tool call. Required. - :vartype outputs: list[~azure.ai.project.models.RunStepCodeInterpreterToolCallOutput] - """ - - input: str = rest_field() - """The input provided by the model to the code interpreter tool. Required.""" - outputs: List["_models.RunStepCodeInterpreterToolCallOutput"] = rest_field() - """The outputs produced by the code interpreter tool back to the model in response to the tool - call. Required.""" - - @overload - def __init__( - self, - *, - input: str, - outputs: List["_models.RunStepCodeInterpreterToolCallOutput"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepCompletionUsage(_model_base.Model): - """Usage statistics related to the run step. - - - :ivar completion_tokens: Number of completion tokens used over the course of the run step. - Required. - :vartype completion_tokens: int - :ivar prompt_tokens: Number of prompt tokens used over the course of the run step. Required. - :vartype prompt_tokens: int - :ivar total_tokens: Total number of tokens used (prompt + completion). Required. - :vartype total_tokens: int - """ - - completion_tokens: int = rest_field() - """Number of completion tokens used over the course of the run step. Required.""" - prompt_tokens: int = rest_field() - """Number of prompt tokens used over the course of the run step. Required.""" - total_tokens: int = rest_field() - """Total number of tokens used (prompt + completion). Required.""" - - @overload - def __init__( - self, - *, - completion_tokens: int, - prompt_tokens: int, - total_tokens: int, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepDelta(_model_base.Model): - """Represents the delta payload in a streaming run step delta chunk. - - :ivar step_details: The details of the run step. - :vartype step_details: ~azure.ai.project.models.RunStepDeltaDetail - """ - - step_details: Optional["_models.RunStepDeltaDetail"] = rest_field() - """The details of the run step.""" - - @overload - def __init__( - self, - *, - step_details: Optional["_models.RunStepDeltaDetail"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepDeltaChunk(_model_base.Model): - """Represents a run step delta i.e. any changed fields on a run step during streaming. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier of the run step, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always ``thread.run.step.delta``. Required. Default - value is "thread.run.step.delta". - :vartype object: str - :ivar delta: The delta containing the fields that have changed on the run step. Required. - :vartype delta: ~azure.ai.project.models.RunStepDelta - """ - - id: str = rest_field() - """The identifier of the run step, which can be referenced in API endpoints. Required.""" - object: Literal["thread.run.step.delta"] = rest_field() - """The object type, which is always ``thread.run.step.delta``. Required. Default value is - \"thread.run.step.delta\".""" - delta: "_models.RunStepDelta" = rest_field() - """The delta containing the fields that have changed on the run step. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - delta: "_models.RunStepDelta", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread.run.step.delta"] = "thread.run.step.delta" - - -class RunStepDeltaCodeInterpreterDetailItemObject(_model_base.Model): # pylint: disable=name-too-long - """Represents the Code Interpreter tool call data in a streaming run step's tool calls. - - :ivar input: The input into the Code Interpreter tool call. - :vartype input: str - :ivar outputs: The outputs from the Code Interpreter tool call. Code Interpreter can output one - or more - items, including text (\\ ``logs``\\ ) or images (\\ ``image``\\ ). Each of these are - represented by a - different object type. - :vartype outputs: list[~azure.ai.project.models.RunStepDeltaCodeInterpreterOutput] - """ - - input: Optional[str] = rest_field() - """The input into the Code Interpreter tool call.""" - outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = rest_field() - """The outputs from the Code Interpreter tool call. Code Interpreter can output one or more - items, including text (\ ``logs``\ ) or images (\ ``image``\ ). Each of these are represented - by a - different object type.""" - - @overload - def __init__( - self, - *, - input: Optional[str] = None, - outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepDeltaCodeInterpreterOutput(_model_base.Model): - """The abstract base representation of a streaming run step tool call's Code Interpreter tool - output. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepDeltaCodeInterpreterImageOutput, RunStepDeltaCodeInterpreterLogOutput - - - :ivar index: The index of the output in the streaming run step tool call's Code Interpreter - outputs array. Required. - :vartype index: int - :ivar type: The type of the streaming run step tool call's Code Interpreter output. Required. - Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - index: int = rest_field() - """The index of the output in the streaming run step tool call's Code Interpreter outputs array. - Required.""" - type: str = rest_discriminator(name="type") - """The type of the streaming run step tool call's Code Interpreter output. Required. Default value - is None.""" - - @overload - def __init__( - self, - *, - index: int, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepDeltaCodeInterpreterImageOutput(RunStepDeltaCodeInterpreterOutput, discriminator="image"): - """Represents an image output as produced the Code interpreter tool and as represented in a - streaming run step's delta tool calls collection. - - - :ivar index: The index of the output in the streaming run step tool call's Code Interpreter - outputs array. Required. - :vartype index: int - :ivar type: The object type, which is always "image.". Required. Default value is "image". - :vartype type: str - :ivar image: The image data for the Code Interpreter tool call output. - :vartype image: ~azure.ai.project.models.RunStepDeltaCodeInterpreterImageOutputObject - """ - - type: Literal["image"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always \"image.\". Required. Default value is \"image\".""" - image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = rest_field() - """The image data for the Code Interpreter tool call output.""" - - @overload - def __init__( - self, - *, - index: int, - image: Optional["_models.RunStepDeltaCodeInterpreterImageOutputObject"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="image", **kwargs) - - -class RunStepDeltaCodeInterpreterImageOutputObject(_model_base.Model): # pylint: disable=name-too-long - """Represents the data for a streaming run step's Code Interpreter tool call image output. - - :ivar file_id: The file ID for the image. - :vartype file_id: str - """ - - file_id: Optional[str] = rest_field() - """The file ID for the image.""" - - @overload - def __init__( - self, - *, - file_id: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepDeltaCodeInterpreterLogOutput(RunStepDeltaCodeInterpreterOutput, discriminator="logs"): - """Represents a log output as produced by the Code Interpreter tool and as represented in a - streaming run step's delta tool calls collection. - - - :ivar index: The index of the output in the streaming run step tool call's Code Interpreter - outputs array. Required. - :vartype index: int - :ivar type: The type of the object, which is always "logs.". Required. Default value is "logs". - :vartype type: str - :ivar logs: The text output from the Code Interpreter tool call. - :vartype logs: str - """ - - type: Literal["logs"] = rest_discriminator(name="type") # type: ignore - """The type of the object, which is always \"logs.\". Required. Default value is \"logs\".""" - logs: Optional[str] = rest_field() - """The text output from the Code Interpreter tool call.""" - - @overload - def __init__( - self, - *, - index: int, - logs: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="logs", **kwargs) - - -class RunStepDeltaToolCall(_model_base.Model): - """The abstract base representation of a single tool call within a streaming run step's delta tool - call details. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepDeltaCodeInterpreterToolCall, RunStepDeltaFileSearchToolCall, - RunStepDeltaFunctionToolCall - - - :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. - :vartype index: int - :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. - :vartype id: str - :ivar type: The type of the tool call detail item in a streaming run step's details. Required. - Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - index: int = rest_field() - """The index of the tool call detail in the run step's tool_calls array. Required.""" - id: str = rest_field() - """The ID of the tool call, used when submitting outputs to the run. Required.""" - type: str = rest_discriminator(name="type") - """The type of the tool call detail item in a streaming run step's details. Required. Default - value is None.""" - - @overload - def __init__( - self, - *, - index: int, - id: str, # pylint: disable=redefined-builtin - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepDeltaCodeInterpreterToolCall(RunStepDeltaToolCall, discriminator="code_interpreter"): - """Represents a Code Interpreter tool call within a streaming run step's tool call details. - - - :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. - :vartype index: int - :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. - :vartype id: str - :ivar type: The object type, which is always "code_interpreter.". Required. Default value is - "code_interpreter". - :vartype type: str - :ivar code_interpreter: The Code Interpreter data for the tool call. - :vartype code_interpreter: ~azure.ai.project.models.RunStepDeltaCodeInterpreterDetailItemObject - """ - - type: Literal["code_interpreter"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always \"code_interpreter.\". Required. Default value is - \"code_interpreter\".""" - code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = rest_field() - """The Code Interpreter data for the tool call.""" - - @overload - def __init__( - self, - *, - index: int, - id: str, # pylint: disable=redefined-builtin - code_interpreter: Optional["_models.RunStepDeltaCodeInterpreterDetailItemObject"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="code_interpreter", **kwargs) - - -class RunStepDeltaDetail(_model_base.Model): - """Represents a single run step detail item in a streaming run step's delta payload. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepDeltaMessageCreation, RunStepDeltaToolCallObject - - - :ivar type: The object type for the run step detail object. Required. Default value is None. - :vartype type: str - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type for the run step detail object. Required. Default value is None.""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepDeltaFileSearchToolCall(RunStepDeltaToolCall, discriminator="file_search"): - """Represents a file search tool call within a streaming run step's tool call details. - - - :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. - :vartype index: int - :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. - :vartype id: str - :ivar type: The object type, which is always "file_search.". Required. Default value is - "file_search". - :vartype type: str - :ivar file_search: Reserved for future use. - :vartype file_search: dict[str, str] - """ - - type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always \"file_search.\". Required. Default value is \"file_search\".""" - file_search: Optional[Dict[str, str]] = rest_field() - """Reserved for future use.""" - - @overload - def __init__( - self, - *, - index: int, - id: str, # pylint: disable=redefined-builtin - file_search: Optional[Dict[str, str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="file_search", **kwargs) - - -class RunStepDeltaFunction(_model_base.Model): - """Represents the function data in a streaming run step delta's function tool call. - - :ivar name: The name of the function. - :vartype name: str - :ivar arguments: The arguments passed to the function as input. - :vartype arguments: str - :ivar output: The output of the function, null if outputs have not yet been submitted. - :vartype output: str - """ - - name: Optional[str] = rest_field() - """The name of the function.""" - arguments: Optional[str] = rest_field() - """The arguments passed to the function as input.""" - output: Optional[str] = rest_field() - """The output of the function, null if outputs have not yet been submitted.""" - - @overload - def __init__( - self, - *, - name: Optional[str] = None, - arguments: Optional[str] = None, - output: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepDeltaFunctionToolCall(RunStepDeltaToolCall, discriminator="function"): - """Represents a function tool call within a streaming run step's tool call details. - - - :ivar index: The index of the tool call detail in the run step's tool_calls array. Required. - :vartype index: int - :ivar id: The ID of the tool call, used when submitting outputs to the run. Required. - :vartype id: str - :ivar type: The object type, which is always "function.". Required. Default value is - "function". - :vartype type: str - :ivar function: The function data for the tool call. - :vartype function: ~azure.ai.project.models.RunStepDeltaFunction - """ - - type: Literal["function"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always \"function.\". Required. Default value is \"function\".""" - function: Optional["_models.RunStepDeltaFunction"] = rest_field() - """The function data for the tool call.""" - - @overload - def __init__( - self, - *, - index: int, - id: str, # pylint: disable=redefined-builtin - function: Optional["_models.RunStepDeltaFunction"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="function", **kwargs) - - -class RunStepDeltaMessageCreation(RunStepDeltaDetail, discriminator="message_creation"): - """Represents a message creation within a streaming run step delta. - - - :ivar type: The object type, which is always "message_creation.". Required. Default value is - "message_creation". - :vartype type: str - :ivar message_creation: The message creation data. - :vartype message_creation: ~azure.ai.project.models.RunStepDeltaMessageCreationObject - """ - - type: Literal["message_creation"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always \"message_creation.\". Required. Default value is - \"message_creation\".""" - message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = rest_field() - """The message creation data.""" - - @overload - def __init__( - self, - *, - message_creation: Optional["_models.RunStepDeltaMessageCreationObject"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="message_creation", **kwargs) - - -class RunStepDeltaMessageCreationObject(_model_base.Model): - """Represents the data within a streaming run step message creation response object. - - :ivar message_id: The ID of the newly-created message. - :vartype message_id: str - """ - - message_id: Optional[str] = rest_field() - """The ID of the newly-created message.""" - - @overload - def __init__( - self, - *, - message_id: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepDeltaToolCallObject(RunStepDeltaDetail, discriminator="tool_calls"): - """Represents an invocation of tool calls as part of a streaming run step. - - - :ivar type: The object type, which is always "tool_calls.". Required. Default value is - "tool_calls". - :vartype type: str - :ivar tool_calls: The collection of tool calls for the tool call detail item. - :vartype tool_calls: list[~azure.ai.project.models.RunStepDeltaToolCall] - """ - - type: Literal["tool_calls"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always \"tool_calls.\". Required. Default value is \"tool_calls\".""" - tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = rest_field() - """The collection of tool calls for the tool call detail item.""" - - @overload - def __init__( - self, - *, - tool_calls: Optional[List["_models.RunStepDeltaToolCall"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="tool_calls", **kwargs) - - -class RunStepDetails(_model_base.Model): - """An abstract representation of the details for a run step. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - RunStepMessageCreationDetails, RunStepToolCallDetails - - - :ivar type: The object type. Required. Known values are: "message_creation" and "tool_calls". - :vartype type: str or ~azure.ai.project.models.RunStepType - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Known values are: \"message_creation\" and \"tool_calls\".""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepError(_model_base.Model): - """The error information associated with a failed run step. - - - :ivar code: The error code for this error. Required. Known values are: "server_error" and - "rate_limit_exceeded". - :vartype code: str or ~azure.ai.project.models.RunStepErrorCode - :ivar message: The human-readable text associated with this error. Required. - :vartype message: str - """ - - code: Union[str, "_models.RunStepErrorCode"] = rest_field() - """The error code for this error. Required. Known values are: \"server_error\" and - \"rate_limit_exceeded\".""" - message: str = rest_field() - """The human-readable text associated with this error. Required.""" - - @overload - def __init__( - self, - *, - code: Union[str, "_models.RunStepErrorCode"], - message: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepFileSearchToolCall(RunStepToolCall, discriminator="file_search"): - """A record of a call to a file search tool, issued by the model in evaluation of a defined tool, - that represents - executed file search. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'file_search'. Required. Default value is - "file_search". - :vartype type: str - :ivar file_search: Reserved for future use. Required. - :vartype file_search: dict[str, str] - """ - - type: Literal["file_search"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'file_search'. Required. Default value is \"file_search\".""" - file_search: Dict[str, str] = rest_field() - """Reserved for future use. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - file_search: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="file_search", **kwargs) - - -class RunStepFunctionToolCall(RunStepToolCall, discriminator="function"): - """A record of a call to a function tool, issued by the model in evaluation of a defined tool, - that represents the inputs - and output consumed and emitted by the specified function. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'function'. Required. Default value is "function". - :vartype type: str - :ivar function: The detailed information about the function called by the model. Required. - :vartype function: ~azure.ai.project.models.RunStepFunctionToolCallDetails - """ - - type: Literal["function"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'function'. Required. Default value is \"function\".""" - function: "_models.RunStepFunctionToolCallDetails" = rest_field() - """The detailed information about the function called by the model. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - function: "_models.RunStepFunctionToolCallDetails", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="function", **kwargs) - - -class RunStepFunctionToolCallDetails(_model_base.Model): - """The detailed information about the function called by the model. - - - :ivar name: The name of the function. Required. - :vartype name: str - :ivar arguments: The arguments that the model requires are provided to the named function. - Required. - :vartype arguments: str - :ivar output: The output of the function, only populated for function calls that have already - have had their outputs submitted. Required. - :vartype output: str - """ - - name: str = rest_field() - """The name of the function. Required.""" - arguments: str = rest_field() - """The arguments that the model requires are provided to the named function. Required.""" - output: str = rest_field() - """The output of the function, only populated for function calls that have already have had their - outputs submitted. Required.""" - - @overload - def __init__( - self, - *, - name: str, - arguments: str, - output: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepMessageCreationDetails(RunStepDetails, discriminator="message_creation"): - """The detailed information associated with a message creation run step. - - - :ivar type: The object type, which is always 'message_creation'. Required. Represents a run - step to create a message. - :vartype type: str or ~azure.ai.project.models.MESSAGE_CREATION - :ivar message_creation: Information about the message creation associated with this run step. - Required. - :vartype message_creation: ~azure.ai.project.models.RunStepMessageCreationReference - """ - - type: Literal[RunStepType.MESSAGE_CREATION] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'message_creation'. Required. Represents a run step to create - a message.""" - message_creation: "_models.RunStepMessageCreationReference" = rest_field() - """Information about the message creation associated with this run step. Required.""" - - @overload - def __init__( - self, - *, - message_creation: "_models.RunStepMessageCreationReference", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type=RunStepType.MESSAGE_CREATION, **kwargs) - - -class RunStepMessageCreationReference(_model_base.Model): - """The details of a message created as a part of a run step. - - - :ivar message_id: The ID of the message created by this run step. Required. - :vartype message_id: str - """ - - message_id: str = rest_field() - """The ID of the message created by this run step. Required.""" - - @overload - def __init__( - self, - *, - message_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class RunStepMicrosoftFabricToolCall(RunStepToolCall, discriminator="microsoft_fabric"): - """A record of a call to a Microsoft Fabric tool, issued by the model in evaluation of a defined - tool, that represents - executed Microsoft Fabric operations. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'microsoft_fabric'. Required. Default value is - "microsoft_fabric". - :vartype type: str - :ivar microsoft_fabric: Reserved for future use. Required. - :vartype microsoft_fabric: dict[str, str] - """ - - type: Literal["microsoft_fabric"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'microsoft_fabric'. Required. Default value is - \"microsoft_fabric\".""" - microsoft_fabric: Dict[str, str] = rest_field() - """Reserved for future use. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - microsoft_fabric: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="microsoft_fabric", **kwargs) - - -class RunStepSharepointToolCall(RunStepToolCall, discriminator="sharepoint"): - """A record of a call to a SharePoint tool, issued by the model in evaluation of a defined tool, - that represents - executed SharePoint actions. - - - :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. - Required. - :vartype id: str - :ivar type: The object type, which is always 'sharepoint'. Required. Default value is - "sharepoint". - :vartype type: str - :ivar share_point: Reserved for future use. Required. - :vartype share_point: dict[str, str] - """ - - type: Literal["sharepoint"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'sharepoint'. Required. Default value is \"sharepoint\".""" - share_point: Dict[str, str] = rest_field(name="sharepoint") - """Reserved for future use. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - share_point: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="sharepoint", **kwargs) - - -class RunStepToolCallDetails(RunStepDetails, discriminator="tool_calls"): - """The detailed information associated with a run step calling tools. - - - :ivar type: The object type, which is always 'tool_calls'. Required. Represents a run step that - calls tools. - :vartype type: str or ~azure.ai.project.models.TOOL_CALLS - :ivar tool_calls: A list of tool call details for this run step. Required. - :vartype tool_calls: list[~azure.ai.project.models.RunStepToolCall] - """ - - type: Literal[RunStepType.TOOL_CALLS] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'tool_calls'. Required. Represents a run step that calls - tools.""" - tool_calls: List["_models.RunStepToolCall"] = rest_field() - """A list of tool call details for this run step. Required.""" - - @overload - def __init__( - self, - *, - tool_calls: List["_models.RunStepToolCall"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type=RunStepType.TOOL_CALLS, **kwargs) - - -class SamplingStrategy(_model_base.Model): - """SamplingStrategy Definition. - - - :ivar rate: Sampling rate. Required. - :vartype rate: float - """ - - rate: float = rest_field() - """Sampling rate. Required.""" - - @overload - def __init__( - self, - *, - rate: float, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class SharepointToolDefinition(ToolDefinition, discriminator="sharepoint"): - """The input definition information for a sharepoint tool as used to configure an agent. - - - :ivar type: The object type, which is always 'sharepoint'. Required. Default value is - "sharepoint". - :vartype type: str - """ - - type: Literal["sharepoint"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'sharepoint'. Required. Default value is \"sharepoint\".""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="sharepoint", **kwargs) - - -class SubmitToolOutputsAction(RequiredAction, discriminator="submit_tool_outputs"): - """The details for required tool calls that must be submitted for an agent thread run to continue. - - - :ivar type: The object type, which is always 'submit_tool_outputs'. Required. Default value is - "submit_tool_outputs". - :vartype type: str - :ivar submit_tool_outputs: The details describing tools that should be called to submit tool - outputs. Required. - :vartype submit_tool_outputs: ~azure.ai.project.models.SubmitToolOutputsDetails - """ - - type: Literal["submit_tool_outputs"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'submit_tool_outputs'. Required. Default value is - \"submit_tool_outputs\".""" - submit_tool_outputs: "_models.SubmitToolOutputsDetails" = rest_field() - """The details describing tools that should be called to submit tool outputs. Required.""" - - @overload - def __init__( - self, - *, - submit_tool_outputs: "_models.SubmitToolOutputsDetails", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="submit_tool_outputs", **kwargs) - - -class SubmitToolOutputsDetails(_model_base.Model): - """The details describing tools that should be called to submit tool outputs. - - - :ivar tool_calls: The list of tool calls that must be resolved for the agent thread run to - continue. Required. - :vartype tool_calls: list[~azure.ai.project.models.RequiredToolCall] - """ - - tool_calls: List["_models.RequiredToolCall"] = rest_field() - """The list of tool calls that must be resolved for the agent thread run to continue. Required.""" - - @overload - def __init__( - self, - *, - tool_calls: List["_models.RequiredToolCall"], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class SystemData(_model_base.Model): - """Metadata pertaining to creation and last modification of the resource. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - :ivar created_at: The timestamp the resource was created at. - :vartype created_at: ~datetime.datetime - :ivar created_by: The identity that created the resource. - :vartype created_by: str - :ivar created_by_type: The identity type that created the resource. - :vartype created_by_type: str - :ivar last_modified_at: The timestamp of resource last modification (UTC). - :vartype last_modified_at: ~datetime.datetime - """ - - created_at: Optional[datetime.datetime] = rest_field(name="createdAt", visibility=["read"], format="rfc3339") - """The timestamp the resource was created at.""" - created_by: Optional[str] = rest_field(name="createdBy", visibility=["read"]) - """The identity that created the resource.""" - created_by_type: Optional[str] = rest_field(name="createdByType", visibility=["read"]) - """The identity type that created the resource.""" - last_modified_at: Optional[datetime.datetime] = rest_field( - name="lastModifiedAt", visibility=["read"], format="rfc3339" - ) - """The timestamp of resource last modification (UTC).""" - - -class ThreadDeletionStatus(_model_base.Model): - """The status of a thread deletion operation. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The ID of the resource specified for deletion. Required. - :vartype id: str - :ivar deleted: A value indicating whether deletion was successful. Required. - :vartype deleted: bool - :ivar object: The object type, which is always 'thread.deleted'. Required. Default value is - "thread.deleted". - :vartype object: str - """ - - id: str = rest_field() - """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() - """A value indicating whether deletion was successful. Required.""" - object: Literal["thread.deleted"] = rest_field() - """The object type, which is always 'thread.deleted'. Required. Default value is - \"thread.deleted\".""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - deleted: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread.deleted"] = "thread.deleted" - - -class ThreadMessage(_model_base.Model): - """A single, existing message within an agent thread. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always 'thread.message'. Required. Default value is - "thread.message". - :vartype object: str - :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar thread_id: The ID of the thread that this message belongs to. Required. - :vartype thread_id: str - :ivar status: The status of the message. Required. Known values are: "in_progress", - "incomplete", and "completed". - :vartype status: str or ~azure.ai.project.models.MessageStatus - :ivar incomplete_details: On an incomplete message, details about why the message is - incomplete. Required. - :vartype incomplete_details: ~azure.ai.project.models.MessageIncompleteDetails - :ivar completed_at: The Unix timestamp (in seconds) for when the message was completed. - Required. - :vartype completed_at: ~datetime.datetime - :ivar incomplete_at: The Unix timestamp (in seconds) for when the message was marked as - incomplete. Required. - :vartype incomplete_at: ~datetime.datetime - :ivar role: The role associated with the agent thread message. Required. Known values are: - "user" and "assistant". - :vartype role: str or ~azure.ai.project.models.MessageRole - :ivar content: The list of content items associated with the agent thread message. Required. - :vartype content: list[~azure.ai.project.models.MessageContent] - :ivar assistant_id: If applicable, the ID of the agent that authored this message. Required. - :vartype assistant_id: str - :ivar run_id: If applicable, the ID of the run associated with the authoring of this message. - Required. - :vartype run_id: str - :ivar attachments: A list of files attached to the message, and the tools they were added to. - Required. - :vartype attachments: list[~azure.ai.project.models.MessageAttachment] - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required. - :vartype metadata: dict[str, str] - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["thread.message"] = rest_field() - """The object type, which is always 'thread.message'. Required. Default value is - \"thread.message\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this object was created. Required.""" - thread_id: str = rest_field() - """The ID of the thread that this message belongs to. Required.""" - status: Union[str, "_models.MessageStatus"] = rest_field() - """The status of the message. Required. Known values are: \"in_progress\", \"incomplete\", and - \"completed\".""" - incomplete_details: "_models.MessageIncompleteDetails" = rest_field() - """On an incomplete message, details about why the message is incomplete. Required.""" - completed_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the message was completed. Required.""" - incomplete_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the message was marked as incomplete. Required.""" - role: Union[str, "_models.MessageRole"] = rest_field() - """The role associated with the agent thread message. Required. Known values are: \"user\" and - \"assistant\".""" - content: List["_models.MessageContent"] = rest_field() - """The list of content items associated with the agent thread message. Required.""" - assistant_id: str = rest_field() - """If applicable, the ID of the agent that authored this message. Required.""" - run_id: str = rest_field() - """If applicable, the ID of the run associated with the authoring of this message. Required.""" - attachments: List["_models.MessageAttachment"] = rest_field() - """A list of files attached to the message, and the tools they were added to. Required.""" - metadata: Dict[str, str] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - created_at: datetime.datetime, - thread_id: str, - status: Union[str, "_models.MessageStatus"], - incomplete_details: "_models.MessageIncompleteDetails", - completed_at: datetime.datetime, - incomplete_at: datetime.datetime, - role: Union[str, "_models.MessageRole"], - content: List["_models.MessageContent"], - assistant_id: str, - run_id: str, - attachments: List["_models.MessageAttachment"], - metadata: Dict[str, str], - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread.message"] = "thread.message" - - -class ThreadMessageOptions(_model_base.Model): - """A single message within an agent thread, as provided during that thread's creation for its - initial state. - - All required parameters must be populated in order to send to server. - - :ivar role: The role of the entity that is creating the message. Allowed values include: - - - * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Required. Known values are: "user" and "assistant". - :vartype role: str or ~azure.ai.project.models.MessageRole - :ivar content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :vartype content: str - :ivar attachments: A list of files attached to the message, and the tools they should be added - to. - :vartype attachments: list[~azure.ai.project.models.MessageAttachment] - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. - :vartype metadata: dict[str, str] - """ - - role: Union[str, "_models.MessageRole"] = rest_field() - """The role of the entity that is creating the message. Allowed values include: - - - * ``user``\ : Indicates the message is sent by an actual user and should be used in most cases - to represent user-generated messages. - * ``assistant``\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Required. Known values are: \"user\" and \"assistant\".""" - content: str = rest_field() - """The textual content of the initial message. Currently, robust input including images and - annotated text may only be provided via - a separate call to the create message API. Required.""" - attachments: Optional[List["_models.MessageAttachment"]] = rest_field() - """A list of files attached to the message, and the tools they should be added to.""" - metadata: Optional[Dict[str, str]] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length.""" - - @overload - def __init__( - self, - *, - role: Union[str, "_models.MessageRole"], - content: str, - attachments: Optional[List["_models.MessageAttachment"]] = None, - metadata: Optional[Dict[str, str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class ThreadRun(_model_base.Model): - """Data representing a single evaluation run of an agent thread. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always 'thread.run'. Required. Default value is - "thread.run". - :vartype object: str - :ivar thread_id: The ID of the thread associated with this run. Required. - :vartype thread_id: str - :ivar assistant_id: The ID of the agent associated with the thread this run was performed - against. Required. - :vartype assistant_id: str - :ivar status: The status of the agent thread run. Required. Known values are: "queued", - "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", and - "expired". - :vartype status: str or ~azure.ai.project.models.RunStatus - :ivar required_action: The details of the action required for the agent thread run to continue. - :vartype required_action: ~azure.ai.project.models.RequiredAction - :ivar last_error: The last error, if any, encountered by this agent thread run. Required. - :vartype last_error: ~azure.ai.project.models.RunError - :ivar model: The ID of the model to use. Required. - :vartype model: str - :ivar instructions: The overridden system instructions used for this agent thread run. - Required. - :vartype instructions: str - :ivar tools: The overridden enabled tools used for this agent thread run. Required. - :vartype tools: list[~azure.ai.project.models.ToolDefinition] - :ivar created_at: The Unix timestamp, in seconds, representing when this object was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar expires_at: The Unix timestamp, in seconds, representing when this item expires. - Required. - :vartype expires_at: ~datetime.datetime - :ivar started_at: The Unix timestamp, in seconds, representing when this item was started. - Required. - :vartype started_at: ~datetime.datetime - :ivar completed_at: The Unix timestamp, in seconds, representing when this completed. Required. - :vartype completed_at: ~datetime.datetime - :ivar cancelled_at: The Unix timestamp, in seconds, representing when this was cancelled. - Required. - :vartype cancelled_at: ~datetime.datetime - :ivar failed_at: The Unix timestamp, in seconds, representing when this failed. Required. - :vartype failed_at: ~datetime.datetime - :ivar incomplete_details: Details on why the run is incomplete. Will be ``null`` if the run is - not incomplete. Required. Known values are: "max_completion_tokens" and "max_prompt_tokens". - :vartype incomplete_details: str or ~azure.ai.project.models.IncompleteRunDetails - :ivar usage: Usage statistics related to the run. This value will be ``null`` if the run is not - in a terminal state (i.e. ``in_progress``\\ , ``queued``\\ , etc.). Required. - :vartype usage: ~azure.ai.project.models.RunCompletionUsage - :ivar temperature: The sampling temperature used for this run. If not set, defaults to 1. - :vartype temperature: float - :ivar top_p: The nucleus sampling value used for this run. If not set, defaults to 1. - :vartype top_p: float - :ivar max_prompt_tokens: The maximum number of prompt tokens specified to have been used over - the course of the run. Required. - :vartype max_prompt_tokens: int - :ivar max_completion_tokens: The maximum number of completion tokens specified to have been - used over the course of the run. Required. - :vartype max_completion_tokens: int - :ivar truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Required. - :vartype truncation_strategy: ~azure.ai.project.models.TruncationObject - :ivar tool_choice: Controls whether or not and which tool is called by the model. Required. Is - one of the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice - :vartype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :ivar response_format: The response format of the tool calls used in this run. Required. Is one - of the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat - :vartype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode or - ~azure.ai.project.models.AgentsApiResponseFormat - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required. - :vartype metadata: dict[str, str] - :ivar tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. - :vartype tool_resources: ~azure.ai.project.models.UpdateToolResourcesOptions - :ivar parallel_tool_calls: Determines if tools can be executed in parallel within the run. - :vartype parallel_tool_calls: bool - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["thread.run"] = rest_field() - """The object type, which is always 'thread.run'. Required. Default value is \"thread.run\".""" - thread_id: str = rest_field() - """The ID of the thread associated with this run. Required.""" - assistant_id: str = rest_field() - """The ID of the agent associated with the thread this run was performed against. Required.""" - status: Union[str, "_models.RunStatus"] = rest_field() - """The status of the agent thread run. Required. Known values are: \"queued\", \"in_progress\", - \"requires_action\", \"cancelling\", \"cancelled\", \"failed\", \"completed\", and \"expired\".""" - required_action: Optional["_models.RequiredAction"] = rest_field() - """The details of the action required for the agent thread run to continue.""" - last_error: "_models.RunError" = rest_field() - """The last error, if any, encountered by this agent thread run. Required.""" - model: str = rest_field() - """The ID of the model to use. Required.""" - instructions: str = rest_field() - """The overridden system instructions used for this agent thread run. Required.""" - tools: List["_models.ToolDefinition"] = rest_field() - """The overridden enabled tools used for this agent thread run. Required.""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this object was created. Required.""" - expires_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this item expires. Required.""" - started_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this item was started. Required.""" - completed_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this completed. Required.""" - cancelled_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this was cancelled. Required.""" - failed_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp, in seconds, representing when this failed. Required.""" - incomplete_details: Union[str, "_models.IncompleteRunDetails"] = rest_field() - """Details on why the run is incomplete. Will be ``null`` if the run is not incomplete. Required. - Known values are: \"max_completion_tokens\" and \"max_prompt_tokens\".""" - usage: "_models.RunCompletionUsage" = rest_field() - """Usage statistics related to the run. This value will be ``null`` if the run is not in a - terminal state (i.e. ``in_progress``\ , ``queued``\ , etc.). Required.""" - temperature: Optional[float] = rest_field() - """The sampling temperature used for this run. If not set, defaults to 1.""" - top_p: Optional[float] = rest_field() - """The nucleus sampling value used for this run. If not set, defaults to 1.""" - max_prompt_tokens: int = rest_field() - """The maximum number of prompt tokens specified to have been used over the course of the run. - Required.""" - max_completion_tokens: int = rest_field() - """The maximum number of completion tokens specified to have been used over the course of the run. - Required.""" - truncation_strategy: "_models.TruncationObject" = rest_field() - """The strategy to use for dropping messages as the context windows moves forward. Required.""" - tool_choice: "_types.AgentsApiToolChoiceOption" = rest_field() - """Controls whether or not and which tool is called by the model. Required. Is one of the - following types: str, Union[str, \"_models.AgentsApiToolChoiceOptionMode\"], - AgentsNamedToolChoice""" - response_format: "_types.AgentsApiResponseFormatOption" = rest_field() - """The response format of the tool calls used in this run. Required. Is one of the following - types: str, Union[str, \"_models.AgentsApiResponseFormatMode\"], AgentsApiResponseFormat""" - metadata: Dict[str, str] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required.""" - tool_resources: Optional["_models.UpdateToolResourcesOptions"] = rest_field() - """Override the tools the agent can use for this run. This is useful for modifying the behavior on - a per-run basis.""" - parallel_tool_calls: Optional[bool] = rest_field(name="parallelToolCalls") - """Determines if tools can be executed in parallel within the run.""" - - @overload - def __init__( # pylint: disable=too-many-locals - self, - *, - id: str, # pylint: disable=redefined-builtin - thread_id: str, - assistant_id: str, - status: Union[str, "_models.RunStatus"], - last_error: "_models.RunError", - model: str, - instructions: str, - tools: List["_models.ToolDefinition"], - created_at: datetime.datetime, - expires_at: datetime.datetime, - started_at: datetime.datetime, - completed_at: datetime.datetime, - cancelled_at: datetime.datetime, - failed_at: datetime.datetime, - incomplete_details: Union[str, "_models.IncompleteRunDetails"], - usage: "_models.RunCompletionUsage", - max_prompt_tokens: int, - max_completion_tokens: int, - truncation_strategy: "_models.TruncationObject", - tool_choice: "_types.AgentsApiToolChoiceOption", - response_format: "_types.AgentsApiResponseFormatOption", - metadata: Dict[str, str], - required_action: Optional["_models.RequiredAction"] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - tool_resources: Optional["_models.UpdateToolResourcesOptions"] = None, - parallel_tool_calls: Optional[bool] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["thread.run"] = "thread.run" - - -class ToolOutput(_model_base.Model): - """The data provided during a tool outputs submission to resolve pending tool calls and allow the - model to continue. - - :ivar tool_call_id: The ID of the tool call being resolved, as provided in the tool calls of a - required action from a run. - :vartype tool_call_id: str - :ivar output: The output from the tool to be submitted. - :vartype output: str - """ - - tool_call_id: Optional[str] = rest_field() - """The ID of the tool call being resolved, as provided in the tool calls of a required action from - a run.""" - output: Optional[str] = rest_field() - """The output from the tool to be submitted.""" - - @overload - def __init__( - self, - *, - tool_call_id: Optional[str] = None, - output: Optional[str] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class ToolResources(_model_base.Model): - """A set of resources that are used by the agent's tools. The resources are specific to the type - of - tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while the - ``file_search`` - tool requires a list of vector store IDs. - - :ivar code_interpreter: Resources to be used by the ``code_interpreter tool`` consisting of - file IDs. - :vartype code_interpreter: ~azure.ai.project.models.CodeInterpreterToolResource - :ivar file_search: Resources to be used by the ``file_search`` tool consisting of vector store - IDs. - :vartype file_search: ~azure.ai.project.models.FileSearchToolResource - :ivar bing_grounding: Resources to be used by the ``bing_grounding`` tool consisting of - connection IDs. - :vartype bing_grounding: ~azure.ai.project.models.ConnectionListResource - :ivar microsoft_fabric: Resources to be used by the ``microsoft_fabric`` tool consisting of - connection IDs. - :vartype microsoft_fabric: ~azure.ai.project.models.ConnectionListResource - :ivar share_point: Resources to be used by the ``sharepoint`` tool consisting of connection - IDs. - :vartype share_point: ~azure.ai.project.models.ConnectionListResource - :ivar azure_ai_search: Resources to be used by the ``azure_ai_search`` tool consisting of index - IDs and names. - :vartype azure_ai_search: ~azure.ai.project.models.AzureAISearchResource - """ - - code_interpreter: Optional["_models.CodeInterpreterToolResource"] = rest_field() - """Resources to be used by the ``code_interpreter tool`` consisting of file IDs.""" - file_search: Optional["_models.FileSearchToolResource"] = rest_field() - """Resources to be used by the ``file_search`` tool consisting of vector store IDs.""" - bing_grounding: Optional["_models.ConnectionListResource"] = rest_field() - """Resources to be used by the ``bing_grounding`` tool consisting of connection IDs.""" - microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() - """Resources to be used by the ``microsoft_fabric`` tool consisting of connection IDs.""" - share_point: Optional["_models.ConnectionListResource"] = rest_field(name="sharepoint") - """Resources to be used by the ``sharepoint`` tool consisting of connection IDs.""" - azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() - """Resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names.""" - - @overload - def __init__( - self, - *, - code_interpreter: Optional["_models.CodeInterpreterToolResource"] = None, - file_search: Optional["_models.FileSearchToolResource"] = None, - bing_grounding: Optional["_models.ConnectionListResource"] = None, - microsoft_fabric: Optional["_models.ConnectionListResource"] = None, - share_point: Optional["_models.ConnectionListResource"] = None, - azure_ai_search: Optional["_models.AzureAISearchResource"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class TruncationObject(_model_base.Model): - """Controls for how a thread will be truncated prior to the run. Use this to control the initial - context window of the run. - - - :ivar type: The truncation strategy to use for the thread. The default is ``auto``. If set to - ``last_messages``\\ , the thread will - be truncated to the ``lastMessages`` count most recent messages in the thread. When set to - ``auto``\\ , messages in the middle of the thread - will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known - values are: "auto" and "last_messages". - :vartype type: str or ~azure.ai.project.models.TruncationStrategy - :ivar last_messages: The number of most recent messages from the thread when constructing the - context for the run. - :vartype last_messages: int - """ - - type: Union[str, "_models.TruncationStrategy"] = rest_field() - """The truncation strategy to use for the thread. The default is ``auto``. If set to - ``last_messages``\ , the thread will - be truncated to the ``lastMessages`` count most recent messages in the thread. When set to - ``auto``\ , messages in the middle of the thread - will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known - values are: \"auto\" and \"last_messages\".""" - last_messages: Optional[int] = rest_field() - """The number of most recent messages from the thread when constructing the context for the run.""" - - @overload - def __init__( - self, - *, - type: Union[str, "_models.TruncationStrategy"], - last_messages: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class UpdateCodeInterpreterToolResourceOptions(_model_base.Model): - """Request object to update ``code_interpreted`` tool resources. - - :ivar file_ids: A list of file IDs to override the current list of the agent. - :vartype file_ids: list[str] - """ - - file_ids: Optional[List[str]] = rest_field() - """A list of file IDs to override the current list of the agent.""" - - @overload - def __init__( - self, - *, - file_ids: Optional[List[str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class UpdateFileSearchToolResourceOptions(_model_base.Model): - """Request object to update ``file_search`` tool resources. - - :ivar vector_store_ids: A list of vector store IDs to override the current list of the agent. - :vartype vector_store_ids: list[str] - """ - - vector_store_ids: Optional[List[str]] = rest_field() - """A list of vector store IDs to override the current list of the agent.""" - - @overload - def __init__( - self, - *, - vector_store_ids: Optional[List[str]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class UpdateToolResourcesOptions(_model_base.Model): - """Request object. A set of resources that are used by the agent's tools. The resources are - specific to the type of tool. - For example, the ``code_interpreter`` tool requires a list of file IDs, while the - ``file_search`` tool requires a list of - vector store IDs. - - :ivar code_interpreter: Overrides the list of file IDs made available to the - ``code_interpreter`` tool. There can be a maximum of 20 files - associated with the tool. - :vartype code_interpreter: ~azure.ai.project.models.UpdateCodeInterpreterToolResourceOptions - :ivar file_search: Overrides the vector store attached to this agent. There can be a maximum of - 1 vector store attached to the agent. - :vartype file_search: ~azure.ai.project.models.UpdateFileSearchToolResourceOptions - :ivar bing_grounding: Overrides the list of connections to be used by the ``bing_grounding`` - tool consisting of connection IDs. - :vartype bing_grounding: ~azure.ai.project.models.ConnectionListResource - :ivar microsoft_fabric: Overrides the list of connections to be used by the - ``microsoft_fabric`` tool consisting of connection IDs. - :vartype microsoft_fabric: ~azure.ai.project.models.ConnectionListResource - :ivar share_point: Overrides the list of connections to be used by the ``sharepoint`` tool - consisting of connection IDs. - :vartype share_point: ~azure.ai.project.models.ConnectionListResource - :ivar azure_ai_search: Overrides the resources to be used by the ``azure_ai_search`` tool - consisting of index IDs and names. - :vartype azure_ai_search: ~azure.ai.project.models.AzureAISearchResource - """ - - code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = rest_field() - """Overrides the list of file IDs made available to the ``code_interpreter`` tool. There can be a - maximum of 20 files - associated with the tool.""" - file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = rest_field() - """Overrides the vector store attached to this agent. There can be a maximum of 1 vector store - attached to the agent.""" - bing_grounding: Optional["_models.ConnectionListResource"] = rest_field() - """Overrides the list of connections to be used by the ``bing_grounding`` tool consisting of - connection IDs.""" - microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() - """Overrides the list of connections to be used by the ``microsoft_fabric`` tool consisting of - connection IDs.""" - share_point: Optional["_models.ConnectionListResource"] = rest_field(name="sharepoint") - """Overrides the list of connections to be used by the ``sharepoint`` tool consisting of - connection IDs.""" - azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() - """Overrides the resources to be used by the ``azure_ai_search`` tool consisting of index IDs and - names.""" - - @overload - def __init__( - self, - *, - code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = None, - file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = None, - bing_grounding: Optional["_models.ConnectionListResource"] = None, - microsoft_fabric: Optional["_models.ConnectionListResource"] = None, - share_point: Optional["_models.ConnectionListResource"] = None, - azure_ai_search: Optional["_models.AzureAISearchResource"] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class VectorStore(_model_base.Model): - """A vector store is a collection of processed files can be used by the ``file_search`` tool. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always ``vector_store``. Required. Default value is - "vector_store". - :vartype object: str - :ivar created_at: The Unix timestamp (in seconds) for when the vector store was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar name: The name of the vector store. Required. - :vartype name: str - :ivar usage_bytes: The total number of bytes used by the files in the vector store. Required. - :vartype usage_bytes: int - :ivar file_counts: Files count grouped by status processed or being processed by this vector - store. Required. - :vartype file_counts: ~azure.ai.project.models.VectorStoreFileCount - :ivar status: The status of the vector store, which can be either ``expired``\\ , - ``in_progress``\\ , or ``completed``. A status of ``completed`` indicates that the vector store - is ready for use. Required. Known values are: "expired", "in_progress", and "completed". - :vartype status: str or ~azure.ai.project.models.VectorStoreStatus - :ivar expires_after: Details on when this vector store expires. - :vartype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy - :ivar expires_at: The Unix timestamp (in seconds) for when the vector store will expire. - :vartype expires_at: ~datetime.datetime - :ivar last_active_at: The Unix timestamp (in seconds) for when the vector store was last - active. Required. - :vartype last_active_at: ~datetime.datetime - :ivar metadata: A set of up to 16 key/value pairs that can be attached to an object, used for - storing additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required. - :vartype metadata: dict[str, str] - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["vector_store"] = rest_field() - """The object type, which is always ``vector_store``. Required. Default value is \"vector_store\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the vector store was created. Required.""" - name: str = rest_field() - """The name of the vector store. Required.""" - usage_bytes: int = rest_field() - """The total number of bytes used by the files in the vector store. Required.""" - file_counts: "_models.VectorStoreFileCount" = rest_field() - """Files count grouped by status processed or being processed by this vector store. Required.""" - status: Union[str, "_models.VectorStoreStatus"] = rest_field() - """The status of the vector store, which can be either ``expired``\ , ``in_progress``\ , or - ``completed``. A status of ``completed`` indicates that the vector store is ready for use. - Required. Known values are: \"expired\", \"in_progress\", and \"completed\".""" - expires_after: Optional["_models.VectorStoreExpirationPolicy"] = rest_field() - """Details on when this vector store expires.""" - expires_at: Optional[datetime.datetime] = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the vector store will expire.""" - last_active_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the vector store was last active. Required.""" - metadata: Dict[str, str] = rest_field() - """A set of up to 16 key/value pairs that can be attached to an object, used for storing - additional information about that object in a structured format. Keys may be up to 64 - characters in length and values may be up to 512 characters in length. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - created_at: datetime.datetime, - name: str, - usage_bytes: int, - file_counts: "_models.VectorStoreFileCount", - status: Union[str, "_models.VectorStoreStatus"], - last_active_at: datetime.datetime, - metadata: Dict[str, str], - expires_after: Optional["_models.VectorStoreExpirationPolicy"] = None, - expires_at: Optional[datetime.datetime] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["vector_store"] = "vector_store" - - -class VectorStoreChunkingStrategyRequest(_model_base.Model): - """An abstract representation of a vector store chunking strategy configuration. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - VectorStoreAutoChunkingStrategyRequest, VectorStoreStaticChunkingStrategyRequest - - All required parameters must be populated in order to send to server. - - :ivar type: The object type. Required. Known values are: "auto" and "static". - :vartype type: str or ~azure.ai.project.models.VectorStoreChunkingStrategyRequestType - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Known values are: \"auto\" and \"static\".""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class VectorStoreAutoChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="auto"): - """The default strategy. This strategy currently uses a max_chunk_size_tokens of 800 and - chunk_overlap_tokens of 400. - - All required parameters must be populated in order to send to server. - - :ivar type: The object type, which is always 'auto'. Required. - :vartype type: str or ~azure.ai.project.models.AUTO - """ - - type: Literal[VectorStoreChunkingStrategyRequestType.AUTO] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'auto'. Required.""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.AUTO, **kwargs) - - -class VectorStoreChunkingStrategyResponse(_model_base.Model): - """An abstract representation of a vector store chunking strategy configuration. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - VectorStoreAutoChunkingStrategyResponse, VectorStoreStaticChunkingStrategyResponse - - - :ivar type: The object type. Required. Known values are: "other" and "static". - :vartype type: str or ~azure.ai.project.models.VectorStoreChunkingStrategyResponseType - """ - - __mapping__: Dict[str, _model_base.Model] = {} - type: str = rest_discriminator(name="type") - """The object type. Required. Known values are: \"other\" and \"static\".""" - - @overload - def __init__( - self, - *, - type: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class VectorStoreAutoChunkingStrategyResponse(VectorStoreChunkingStrategyResponse, discriminator="other"): - """This is returned when the chunking strategy is unknown. Typically, this is because the file was - indexed before the chunking_strategy concept was introduced in the API. - - - :ivar type: The object type, which is always 'other'. Required. - :vartype type: str or ~azure.ai.project.models.OTHER - """ - - type: Literal[VectorStoreChunkingStrategyResponseType.OTHER] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'other'. Required.""" - - @overload - def __init__( - self, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.OTHER, **kwargs) - - -class VectorStoreDeletionStatus(_model_base.Model): - """Response object for deleting a vector store. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The ID of the resource specified for deletion. Required. - :vartype id: str - :ivar deleted: A value indicating whether deletion was successful. Required. - :vartype deleted: bool - :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value - is "vector_store.deleted". - :vartype object: str - """ - - id: str = rest_field() - """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() - """A value indicating whether deletion was successful. Required.""" - object: Literal["vector_store.deleted"] = rest_field() - """The object type, which is always 'vector_store.deleted'. Required. Default value is - \"vector_store.deleted\".""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - deleted: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["vector_store.deleted"] = "vector_store.deleted" - - -class VectorStoreExpirationPolicy(_model_base.Model): - """The expiration policy for a vector store. - - - :ivar anchor: Anchor timestamp after which the expiration policy applies. Supported anchors: - ``last_active_at``. Required. "last_active_at" - :vartype anchor: str or ~azure.ai.project.models.VectorStoreExpirationPolicyAnchor - :ivar days: The anchor timestamp after which the expiration policy applies. Required. - :vartype days: int - """ - - anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"] = rest_field() - """Anchor timestamp after which the expiration policy applies. Supported anchors: - ``last_active_at``. Required. \"last_active_at\"""" - days: int = rest_field() - """The anchor timestamp after which the expiration policy applies. Required.""" - - @overload - def __init__( - self, - *, - anchor: Union[str, "_models.VectorStoreExpirationPolicyAnchor"], - days: int, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class VectorStoreFile(_model_base.Model): - """Description of a file attached to a vector store. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always ``vector_store.file``. Required. Default value - is "vector_store.file". - :vartype object: str - :ivar usage_bytes: The total vector store usage in bytes. Note that this may be different from - the original file - size. Required. - :vartype usage_bytes: int - :ivar created_at: The Unix timestamp (in seconds) for when the vector store file was created. - Required. - :vartype created_at: ~datetime.datetime - :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. - :vartype vector_store_id: str - :ivar status: The status of the vector store file, which can be either ``in_progress``\\ , - ``completed``\\ , ``cancelled``\\ , or ``failed``. The status ``completed`` indicates that the - vector store file is ready for use. Required. Known values are: "in_progress", "completed", - "failed", and "cancelled". - :vartype status: str or ~azure.ai.project.models.VectorStoreFileStatus - :ivar last_error: The last error associated with this vector store file. Will be ``null`` if - there are no errors. Required. - :vartype last_error: ~azure.ai.project.models.VectorStoreFileError - :ivar chunking_strategy: The strategy used to chunk the file. Required. - :vartype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyResponse - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["vector_store.file"] = rest_field() - """The object type, which is always ``vector_store.file``. Required. Default value is - \"vector_store.file\".""" - usage_bytes: int = rest_field() - """The total vector store usage in bytes. Note that this may be different from the original file - size. Required.""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the vector store file was created. Required.""" - vector_store_id: str = rest_field() - """The ID of the vector store that the file is attached to. Required.""" - status: Union[str, "_models.VectorStoreFileStatus"] = rest_field() - """The status of the vector store file, which can be either ``in_progress``\ , ``completed``\ , - ``cancelled``\ , or ``failed``. The status ``completed`` indicates that the vector store file - is ready for use. Required. Known values are: \"in_progress\", \"completed\", \"failed\", and - \"cancelled\".""" - last_error: "_models.VectorStoreFileError" = rest_field() - """The last error associated with this vector store file. Will be ``null`` if there are no errors. - Required.""" - chunking_strategy: "_models.VectorStoreChunkingStrategyResponse" = rest_field() - """The strategy used to chunk the file. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - usage_bytes: int, - created_at: datetime.datetime, - vector_store_id: str, - status: Union[str, "_models.VectorStoreFileStatus"], - last_error: "_models.VectorStoreFileError", - chunking_strategy: "_models.VectorStoreChunkingStrategyResponse", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["vector_store.file"] = "vector_store.file" - - -class VectorStoreFileBatch(_model_base.Model): - """A batch of files attached to a vector store. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The identifier, which can be referenced in API endpoints. Required. - :vartype id: str - :ivar object: The object type, which is always ``vector_store.file_batch``. Required. Default - value is "vector_store.files_batch". - :vartype object: str - :ivar created_at: The Unix timestamp (in seconds) for when the vector store files batch was - created. Required. - :vartype created_at: ~datetime.datetime - :ivar vector_store_id: The ID of the vector store that the file is attached to. Required. - :vartype vector_store_id: str - :ivar status: The status of the vector store files batch, which can be either ``in_progress``\\ - , ``completed``\\ , ``cancelled`` or ``failed``. Required. Known values are: "in_progress", - "completed", "cancelled", and "failed". - :vartype status: str or ~azure.ai.project.models.VectorStoreFileBatchStatus - :ivar file_counts: Files count grouped by status processed or being processed by this vector - store. Required. - :vartype file_counts: ~azure.ai.project.models.VectorStoreFileCount - """ - - id: str = rest_field() - """The identifier, which can be referenced in API endpoints. Required.""" - object: Literal["vector_store.files_batch"] = rest_field() - """The object type, which is always ``vector_store.file_batch``. Required. Default value is - \"vector_store.files_batch\".""" - created_at: datetime.datetime = rest_field(format="unix-timestamp") - """The Unix timestamp (in seconds) for when the vector store files batch was created. Required.""" - vector_store_id: str = rest_field() - """The ID of the vector store that the file is attached to. Required.""" - status: Union[str, "_models.VectorStoreFileBatchStatus"] = rest_field() - """The status of the vector store files batch, which can be either ``in_progress``\ , - ``completed``\ , ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", - \"completed\", \"cancelled\", and \"failed\".""" - file_counts: "_models.VectorStoreFileCount" = rest_field() - """Files count grouped by status processed or being processed by this vector store. Required.""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - created_at: datetime.datetime, - vector_store_id: str, - status: Union[str, "_models.VectorStoreFileBatchStatus"], - file_counts: "_models.VectorStoreFileCount", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["vector_store.files_batch"] = "vector_store.files_batch" - - -class VectorStoreFileCount(_model_base.Model): - """Counts of files processed or being processed by this vector store grouped by status. - - - :ivar in_progress: The number of files that are currently being processed. Required. - :vartype in_progress: int - :ivar completed: The number of files that have been successfully processed. Required. - :vartype completed: int - :ivar failed: The number of files that have failed to process. Required. - :vartype failed: int - :ivar cancelled: The number of files that were cancelled. Required. - :vartype cancelled: int - :ivar total: The total number of files. Required. - :vartype total: int - """ - - in_progress: int = rest_field() - """The number of files that are currently being processed. Required.""" - completed: int = rest_field() - """The number of files that have been successfully processed. Required.""" - failed: int = rest_field() - """The number of files that have failed to process. Required.""" - cancelled: int = rest_field() - """The number of files that were cancelled. Required.""" - total: int = rest_field() - """The total number of files. Required.""" - - @overload - def __init__( - self, - *, - in_progress: int, - completed: int, - failed: int, - cancelled: int, - total: int, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class VectorStoreFileDeletionStatus(_model_base.Model): - """Response object for deleting a vector store file relationship. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar id: The ID of the resource specified for deletion. Required. - :vartype id: str - :ivar deleted: A value indicating whether deletion was successful. Required. - :vartype deleted: bool - :ivar object: The object type, which is always 'vector_store.deleted'. Required. Default value - is "vector_store.file.deleted". - :vartype object: str - """ - - id: str = rest_field() - """The ID of the resource specified for deletion. Required.""" - deleted: bool = rest_field() - """A value indicating whether deletion was successful. Required.""" - object: Literal["vector_store.file.deleted"] = rest_field() - """The object type, which is always 'vector_store.deleted'. Required. Default value is - \"vector_store.file.deleted\".""" - - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - deleted: bool, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.object: Literal["vector_store.file.deleted"] = "vector_store.file.deleted" - - -class VectorStoreFileError(_model_base.Model): - """Details on the error that may have ocurred while processing a file for this vector store. - - - :ivar code: One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: - "internal_error", "file_not_found", "parsing_error", and "unhandled_mime_type". - :vartype code: str or ~azure.ai.project.models.VectorStoreFileErrorCode - :ivar message: A human-readable description of the error. Required. - :vartype message: str - """ - - code: Union[str, "_models.VectorStoreFileErrorCode"] = rest_field() - """One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: - \"internal_error\", \"file_not_found\", \"parsing_error\", and \"unhandled_mime_type\".""" - message: str = rest_field() - """A human-readable description of the error. Required.""" - - @overload - def __init__( - self, - *, - code: Union[str, "_models.VectorStoreFileErrorCode"], - message: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class VectorStoreStaticChunkingStrategyOptions(_model_base.Model): - """Options to configure a vector store static chunking strategy. - - - :ivar max_chunk_size_tokens: The maximum number of tokens in each chunk. The default value is - 800. The minimum value is 100 and the maximum value is 4096. Required. - :vartype max_chunk_size_tokens: int - :ivar chunk_overlap_tokens: The number of tokens that overlap between chunks. The default value - is 400. - Note that the overlap must not exceed half of max_chunk_size_tokens. *. Required. - :vartype chunk_overlap_tokens: int - """ - - max_chunk_size_tokens: int = rest_field() - """The maximum number of tokens in each chunk. The default value is 800. The minimum value is 100 - and the maximum value is 4096. Required.""" - chunk_overlap_tokens: int = rest_field() - """The number of tokens that overlap between chunks. The default value is 400. - Note that the overlap must not exceed half of max_chunk_size_tokens. *. Required.""" - - @overload - def __init__( - self, - *, - max_chunk_size_tokens: int, - chunk_overlap_tokens: int, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class VectorStoreStaticChunkingStrategyRequest(VectorStoreChunkingStrategyRequest, discriminator="static"): - """A statically configured chunking strategy. - - All required parameters must be populated in order to send to server. - - :ivar type: The object type, which is always 'static'. Required. - :vartype type: str or ~azure.ai.project.models.STATIC - :ivar static: The options for the static chunking strategy. Required. - :vartype static: ~azure.ai.project.models.VectorStoreStaticChunkingStrategyOptions - """ - - type: Literal[VectorStoreChunkingStrategyRequestType.STATIC] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'static'. Required.""" - static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field() - """The options for the static chunking strategy. Required.""" - - @overload - def __init__( - self, - *, - static: "_models.VectorStoreStaticChunkingStrategyOptions", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type=VectorStoreChunkingStrategyRequestType.STATIC, **kwargs) - - -class VectorStoreStaticChunkingStrategyResponse( - VectorStoreChunkingStrategyResponse, discriminator="static" -): # pylint: disable=name-too-long - """A statically configured chunking strategy. - - - :ivar type: The object type, which is always 'static'. Required. - :vartype type: str or ~azure.ai.project.models.STATIC - :ivar static: The options for the static chunking strategy. Required. - :vartype static: ~azure.ai.project.models.VectorStoreStaticChunkingStrategyOptions - """ - - type: Literal[VectorStoreChunkingStrategyResponseType.STATIC] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'static'. Required.""" - static: "_models.VectorStoreStaticChunkingStrategyOptions" = rest_field() - """The options for the static chunking strategy. Required.""" - - @overload - def __init__( - self, - *, - static: "_models.VectorStoreStaticChunkingStrategyOptions", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.STATIC, **kwargs) diff --git a/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py deleted file mode 100644 index 8bcafd3cf937..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/models/_patch.py +++ /dev/null @@ -1,997 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -import datetime -import inspect -import json -import logging -import base64 -import asyncio - -from azure.core.credentials import TokenCredential, AccessToken - -from ._enums import AgentStreamEvent, ConnectionType -from ._models import ( - ConnectionsListSecretsResponse, - MessageDeltaChunk, - SubmitToolOutputsAction, - ThreadRun, - RunStep, - ThreadMessage, - RunStepDeltaChunk, - FunctionToolDefinition, - FunctionDefinition, - ToolDefinition, - ToolResources, - FileSearchToolDefinition, - FileSearchToolResource, - CodeInterpreterToolDefinition, - CodeInterpreterToolResource, - RequiredFunctionToolCall, -) - -from abc import ABC, abstractmethod -from typing import AsyncIterator, Awaitable, Callable, List, Dict, Any, Type, Optional, Iterator, Tuple, get_origin - -logger = logging.getLogger(__name__) - - -def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[str, Any]: - """ - Remove the parameters, non present in class public fields; return shallow copy of a dictionary. - - **Note:** Classes inherited from the model check that the parameters are present - in the list of attributes and if they are not, the error is being raised. This check may not - be relevant for classes, not inherited from azure.ai.project._model_base.Model. - :param model_class: The class of model to be used. - :param parameters: The parsed dictionary with parameters. - :return: The dictionary with all invalid parameters removed. - """ - new_params = {} - valid_parameters = set( - filter( - lambda x: not x.startswith("_") and hasattr(model_class.__dict__[x], "_type"), model_class.__dict__.keys() - ) - ) - for k in filter(lambda x: x in valid_parameters, parameters.keys()): - new_params[k] = parameters[k] - return new_params - - -def _safe_instantiate(model_class: Type, parameters: Dict[str, Any]) -> Any: - """ - Instantiate class with the set of parameters from the server. - - :param model_class: The class of model to be used. - :param parameters: The parsed dictionary with parameters. - :return: The class of model_class type if parameters is a dictionary, or the parameters themselves otherwise. - """ - if not isinstance(parameters, dict): - return parameters - return model_class(**_filter_parameters(model_class, parameters)) - - -class ConnectionProperties: - """The properties of a single connection. - - :ivar id: A unique identifier for the connection. - :vartype id: str - :ivar name: The friendly name of the connection. - :vartype name: str - :ivar authentication_type: The authentication type used by the connection. - :vartype authentication_type: ~azure.ai.project.models._models.AuthenticationType - :ivar connection_type: The connection type . - :vartype connection_type: ~azure.ai.project.models._models.ConnectionType - :ivar endpoint_url: The endpoint URL associated with this connection - :vartype endpoint_url: str - :ivar key: The api-key to be used when accessing the connection. - :vartype key: str - :ivar token_credential: The TokenCredential to be used when accessing the connection. - :vartype token_credential: ~azure.core.credentials.TokenCredential - """ - - def __init__(self, *, connection: ConnectionsListSecretsResponse, token_credential: TokenCredential = None) -> None: - self.id = connection.id - self.name = connection.name - self.authentication_type = connection.properties.auth_type - self.connection_type = connection.properties.category - self.endpoint_url = ( - connection.properties.target[:-1] - if connection.properties.target.endswith("/") - else connection.properties.target - ) - self.key: str = None - if hasattr(connection.properties, "credentials"): - if hasattr(connection.properties.credentials, "key"): - self.key = connection.properties.credentials.key - self.token_credential = token_credential - - def to_evaluator_model_config(self, deployment_name, api_version) -> Dict[str, str]: - connection_type = self.connection_type.value - if self.connection_type.value == ConnectionType.AZURE_OPEN_AI: - connection_type = "azure_openai" - - if self.authentication_type == "ApiKey": - model_config = { - "azure_deployment": deployment_name, - "azure_endpoint": self.endpoint_url, - "type": connection_type, - "api_version": api_version, - "api_key": f"{self.id}/credentials/key", - } - else: - model_config = { - "azure_deployment": deployment_name, - "azure_endpoint": self.endpoint_url, - "type": self.connection_type, - "api_version": api_version, - } - return model_config - - def __str__(self): - out = "{\n" - out += f' "name": "{self.name}",\n' - out += f' "id": "{self.id}",\n' - out += f' "authentication_type": "{self.authentication_type}",\n' - out += f' "connection_type": "{self.connection_type}",\n' - out += f' "endpoint_url": "{self.endpoint_url}",\n' - if self.key: - out += f' "key": "{self.key}",\n' - else: - out += f' "key": null,\n' - if self.token_credential: - access_token = self.token_credential.get_token("https://cognitiveservices.azure.com/.default") - out += f' "token_credential": "{access_token.token}", expires on {access_token.expires_on} ({datetime.datetime.fromtimestamp(access_token.expires_on, datetime.timezone.utc)})\n' - else: - out += f' "token_credential": null\n' - out += "}\n" - return out - - -class SASTokenCredential(TokenCredential): - def __init__( - self, - *, - sas_token: str, - credential: TokenCredential, - subscription_id: str, - resource_group_name: str, - project_name: str, - connection_name: str, - ): - self._sas_token = sas_token - self._credential = credential - self._subscription_id = subscription_id - self._resource_group_name = resource_group_name - self._project_name = project_name - self._connection_name = connection_name - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) - - @classmethod - def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime: - payload = jwt_token.split(".")[1] - padded_payload = payload + "=" * (4 - len(payload) % 4) # Add padding if necessary - decoded_bytes = base64.urlsafe_b64decode(padded_payload) - decoded_str = decoded_bytes.decode("utf-8") - decoded_payload = json.loads(decoded_str) - expiration_date = decoded_payload.get("exp") - return datetime.datetime.fromtimestamp(expiration_date, datetime.timezone.utc) - - def _refresh_token(self) -> None: - logger.debug("[SASTokenCredential._refresh_token] Enter") - from azure.ai.project import AIProjectClient - - project_client = AIProjectClient( - credential=self._credential, - endpoint="not-needed", # Since we are only going to use the "connections" operations, we don't need to supply an endpoint. http://management.azure.com is hard coded in the SDK. - subscription_id=self._subscription_id, - resource_group_name=self._resource_group_name, - project_name=self._project_name, - ) - - connection = project_client.connections.get(connection_name=self._connection_name, with_credentials=True) - - self._sas_token = connection.properties.credentials.sas - self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) - logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) - - def get_token(self) -> AccessToken: - logger.debug("SASTokenCredential.get_token] Enter") - if self._expires_on < datetime.datetime.now(datetime.timezone.utc): - self._refresh_token() - return AccessToken(self._sas_token, self._expires_on.timestamp()) - - -# Define type_map to translate Python type annotations to JSON Schema types -type_map = { - "str": "string", - "int": "integer", - "float": "number", - "bool": "boolean", - "bytes": "string", # Typically encoded as base64-encoded strings in JSON - "NoneType": "null", - "datetime": "string", # Use format "date-time" - "date": "string", # Use format "date" - "UUID": "string", # Use format "uuid" -} - - -def _map_type(annotation) -> str: - - if annotation == inspect.Parameter.empty: - return "string" # Default type if annotation is missing - - origin = get_origin(annotation) - - if origin in {list, List}: - return "array" - elif origin in {dict, Dict}: - return "object" - elif hasattr(annotation, "__name__"): - return type_map.get(annotation.__name__, "string") - elif isinstance(annotation, type): - return type_map.get(annotation.__name__, "string") - - return "string" # Fallback to "string" if type is unrecognized - - -class Tool(ABC): - """ - An abstract class representing a tool that can be used by an agent. - """ - - @property - @abstractmethod - def definitions(self) -> List[ToolDefinition]: - """Get the tool definitions.""" - pass - - @property - @abstractmethod - def resources(self) -> ToolResources: - """Get the tool resources.""" - pass - - @abstractmethod - def execute(self, tool_call: Any) -> Any: - """ - Execute the tool with the provided tool call. - - :param tool_call: The tool call to execute. - :return: The output of the tool operations. - """ - pass - - -class FunctionTool(Tool): - """ - A tool that executes user-defined functions. - """ - - def __init__(self, functions: Dict[str, Any]): - """ - Initialize FunctionTool with a dictionary of functions. - - :param functions: A dictionary where keys are function names and values are the function objects. - """ - self._functions = functions - self._definitions = self._build_function_definitions(functions) - - def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDefinition]: - specs = [] - for name, func in functions.items(): - sig = inspect.signature(func) - params = sig.parameters - docstring = inspect.getdoc(func) - description = docstring.split("\n")[0] if docstring else "No description" - - properties = {} - for param_name, param in params.items(): - param_type = _map_type(param.annotation) - param_description = param.annotation.__doc__ if param.annotation != inspect.Parameter.empty else None - properties[param_name] = {"type": param_type, "description": param_description} - - function_def = FunctionDefinition( - name=name, - description=description, - parameters={"type": "object", "properties": properties, "required": list(params.keys())}, - ) - tool_def = FunctionToolDefinition(function=function_def) - specs.append(tool_def) - return specs - - def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, Dict[str, Any]]: - function_name = tool_call.function.name - arguments = tool_call.function.arguments - - if function_name not in self._functions: - logging.error(f"Function '{function_name}' not found.") - raise ValueError(f"Function '{function_name}' not found.") - - function = self._functions[function_name] - - try: - parsed_arguments = json.loads(arguments) - except json.JSONDecodeError as e: - logging.error(f"Invalid JSON arguments for function '{function_name}': {e}") - raise ValueError(f"Invalid JSON arguments: {e}") from e - - if not isinstance(parsed_arguments, dict): - logging.error(f"Arguments must be a JSON object for function '{function_name}'.") - raise TypeError("Arguments must be a JSON object.") - - return function, parsed_arguments - - def execute(self, tool_call: RequiredFunctionToolCall) -> Any: - function, parsed_arguments = self._get_func_and_args(tool_call) - - try: - return function(**parsed_arguments) if parsed_arguments else function() - except TypeError as e: - logging.error(f"Error executing function '{tool_call.function.name}': {e}") - raise - - @property - def definitions(self) -> List[ToolDefinition]: - """ - Get the function definitions. - - :return: A list of function definitions. - """ - return self._definitions - - @property - def resources(self) -> ToolResources: - """ - Get the tool resources for the agent. - - :return: An empty ToolResources as FunctionTool doesn't have specific resources. - """ - return ToolResources() - - -class AsyncFunctionTool(FunctionTool): - - async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: - function, parsed_arguments = self._get_func_and_args(tool_call) - - try: - if inspect.iscoroutinefunction(function): - return await function(**parsed_arguments) if parsed_arguments else await function() - else: - return function(**parsed_arguments) if parsed_arguments else function() - except TypeError as e: - logging.error(f"Error executing function '{tool_call.function.name}': {e}") - raise - - -class FileSearchTool(Tool): - """ - A tool that searches for uploaded file information from the created vector stores. - """ - - def __init__(self, vector_store_ids: List[str] = []): - self.vector_store_ids = vector_store_ids - - def add_vector_store(self, store_id: str): - """ - Add a vector store ID to the list of vector stores to search for files. - """ - self.vector_store_ids.append(store_id) - - @property - def definitions(self) -> List[ToolDefinition]: - """ - Get the file search tool definitions. - """ - return [FileSearchToolDefinition()] - - @property - def resources(self) -> ToolResources: - """ - Get the file search resources. - """ - return ToolResources(file_search=FileSearchToolResource(vector_store_ids=self.vector_store_ids)) - - def execute(self, tool_call: Any) -> Any: - pass - - -class CodeInterpreterTool(Tool): - """ - A tool that interprets code files uploaded to the agent. - """ - - def __init__(self): - self.file_ids = [] - - def add_file(self, file_id: str): - """ - Add a file ID to the list of files to interpret. - - :param file_id: The ID of the file to interpret. - """ - self.file_ids.append(file_id) - - @property - def definitions(self) -> List[ToolDefinition]: - """ - Get the code interpreter tool definitions. - """ - return [CodeInterpreterToolDefinition()] - - @property - def resources(self) -> ToolResources: - """ - Get the code interpreter resources. - """ - return ToolResources(code_interpreter=CodeInterpreterToolResource(file_ids=self.file_ids)) - - def execute(self, tool_call: Any) -> Any: - pass - - -class ToolSet: - """ - A collection of tools that can be used by an agent. - """ - - def __init__(self): - self._tools: List[Tool] = [] - - def validate_tool_type(self, tool_type: Type[Tool]) -> None: - """ - Validate the type of the tool. - - :param tool_type: The type of the tool to validate. - :raises ValueError: If the tool type is not a subclass of Tool. - """ - if isinstance(tool_type, AsyncFunctionTool): - raise ValueError( - "AsyncFunctionTool is not supported in ToolSet. To use async functions, use AsyncToolSet and agents operations in azure.ai.project.aio." - ) - - def add(self, tool: Tool): - """ - Add a tool to the tool set. - - :param tool: The tool to add. - :raises ValueError: If a tool of the same type already exists. - """ - self.validate_tool_type(type(tool)) - - if any(isinstance(existing_tool, type(tool)) for existing_tool in self._tools): - raise ValueError("Tool of type {type(tool).__name__} already exists in the ToolSet.") - self._tools.append(tool) - - def remove(self, tool_type: Type[Tool]) -> None: - """ - Remove a tool of the specified type from the tool set. - - :param tool_type: The type of tool to remove. - :raises ValueError: If a tool of the specified type is not found. - """ - for i, tool in enumerate(self._tools): - if isinstance(tool, tool_type): - del self._tools[i] - logging.info(f"Tool of type {tool_type.__name__} removed from the ToolSet.") - return - raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") - - @property - def definitions(self) -> List[ToolDefinition]: - """ - Get the definitions for all tools in the tool set. - """ - tools = [] - for tool in self._tools: - tools.extend(tool.definitions) - return tools - - @property - def resources(self) -> ToolResources: - """ - Get the resources for all tools in the tool set. - """ - tool_resources = {} - for tool in self._tools: - resources = tool.resources - for key, value in resources.items(): - if key in tool_resources: - if isinstance(tool_resources[key], dict) and isinstance(value, dict): - tool_resources[key].update(value) - else: - tool_resources[key] = value - return self._create_tool_resources_from_dict(tool_resources) - - def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolResources: - """ - Safely converts a dictionary into a ToolResources instance. - """ - try: - return ToolResources(**resources) - except TypeError as e: - logging.error(f"Error creating ToolResources: {e}") - raise ValueError("Invalid resources for ToolResources.") from e - - def get_definitions_and_resources(self) -> Dict[str, Any]: - """ - Get the definitions and resources for all tools in the tool set. - - :return: A dictionary containing the tool resources and definitions. - """ - return { - "tool_resources": self.resources, - "tools": self.definitions, - } - - def get_tool(self, tool_type: Type[Tool]) -> Tool: - """ - Get a tool of the specified type from the tool set. - - :param tool_type: The type of tool to get. - :return: The tool of the specified type. - :raises ValueError: If a tool of the specified type is not found. - """ - for tool in self._tools: - if isinstance(tool, tool_type): - return tool - raise ValueError(f"Tool of type {tool_type.__name__} not found.") - - def execute_tool_calls(self, tool_calls: List[Any]) -> Any: - """ - Execute a tool of the specified type with the provided tool calls. - - :param tool_calls: A list of tool calls to execute. - :return: The output of the tool operations. - """ - tool_outputs = [] - - for tool_call in tool_calls: - try: - if tool_call.type == "function": - tool = self.get_tool(FunctionTool) - output = tool.execute(tool_call) - tool_output = { - "tool_call_id": tool_call.id, - "output": output, - } - tool_outputs.append(tool_output) - except Exception as e: - logging.error(f"Failed to execute tool call {tool_call}: {e}") - - return tool_outputs - - -class AsyncToolSet(ToolSet): - - def validate_tool_type(self, tool_type: Type[Tool]) -> None: - """ - Validate the type of the tool. - - :param tool_type: The type of the tool to validate. - :raises ValueError: If the tool type is not a subclass of Tool. - """ - if isinstance(tool_type, FunctionTool): - raise ValueError( - "FunctionTool is not supported in AsyncToolSet. Please use AsyncFunctionTool instead and provide sync and/or async function(s)." - ) - - async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: - """ - Execute a tool of the specified type with the provided tool calls. - - :param tool_calls: A list of tool calls to execute. - :return: The output of the tool operations. - """ - tool_outputs = [] - - for tool_call in tool_calls: - try: - if tool_call.type == "function": - tool = self.get_tool(AsyncFunctionTool) - output = await tool.execute(tool_call) - tool_output = { - "tool_call_id": tool_call.id, - "output": output, - } - tool_outputs.append(tool_output) - except Exception as e: - logging.error(f"Failed to execute tool call {tool_call}: {e}") - - return tool_outputs - - -class AgentEventHandler: - - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - """Handle message delta events.""" - pass - - def on_thread_message(self, message: "ThreadMessage") -> None: - """Handle thread message events.""" - pass - - def on_thread_run(self, run: "ThreadRun") -> None: - """Handle thread run events.""" - pass - - def on_run_step(self, step: "RunStep") -> None: - """Handle run step events.""" - pass - - def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: - """Handle run step delta events.""" - pass - - def on_error(self, data: str) -> None: - """Handle error events.""" - pass - - def on_done(self) -> None: - """Handle the completion of the stream.""" - pass - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - """Handle any unhandled event types.""" - pass - - -class AsyncAgentEventHandler: - - async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - """Handle message delta events.""" - pass - - async def on_thread_message(self, message: "ThreadMessage") -> None: - """Handle thread message events.""" - pass - - async def on_thread_run(self, run: "ThreadRun") -> None: - """Handle thread run events.""" - pass - - async def on_run_step(self, step: "RunStep") -> None: - """Handle run step events.""" - pass - - async def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: - """Handle run step delta events.""" - pass - - async def on_error(self, data: str) -> None: - """Handle error events.""" - pass - - async def on_done(self) -> None: - """Handle the completion of the stream.""" - pass - - async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - """Handle any unhandled event types.""" - pass - - -class AsyncAgentRunStream(AsyncIterator[Tuple[str, Any]]): - def __init__( - self, - response_iterator: AsyncIterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, Optional[AsyncAgentEventHandler]], Awaitable[None]], - event_handler: Optional["AsyncAgentEventHandler"] = None, - ): - self.response_iterator = response_iterator - self.event_handler = event_handler - self.done = False - self.buffer = "" - self.submit_tool_outputs = submit_tool_outputs - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_val, exc_tb): - close_method = getattr(self.response_iterator, "close", None) - if callable(close_method): - result = close_method() - if asyncio.iscoroutine(result): - await result - - def __aiter__(self): - return self - - async def __anext__(self) -> Tuple[str, Any]: - while True: - try: - chunk = await self.response_iterator.__anext__() - self.buffer += chunk.decode("utf-8") - except StopAsyncIteration: - if self.buffer: - event_data_str, self.buffer = self.buffer, "" - if event_data_str: - return await self._process_event(event_data_str) - raise StopAsyncIteration - - while "\n\n" in self.buffer: - event_data_str, self.buffer = self.buffer.split("\n\n", 1) - return await self._process_event(event_data_str) - - def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: - event_lines = event_data_str.strip().split("\n") - event_type = None - event_data = "" - - for line in event_lines: - if line.startswith("event:"): - event_type = line.split(":", 1)[1].strip() - elif line.startswith("data:"): - event_data = line.split(":", 1)[1].strip() - - if not event_type: - raise ValueError("Event type not specified in the event data.") - - try: - parsed_data = json.loads(event_data) - except json.JSONDecodeError: - parsed_data = event_data - - # Workaround for service bug: Rename 'expires_at' to 'expired_at' - if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: - parsed_data["expired_at"] = parsed_data.pop("expires_at") - - # Map to the appropriate class instance - if event_type in { - AgentStreamEvent.THREAD_RUN_CREATED, - AgentStreamEvent.THREAD_RUN_QUEUED, - AgentStreamEvent.THREAD_RUN_IN_PROGRESS, - AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION, - AgentStreamEvent.THREAD_RUN_COMPLETED, - AgentStreamEvent.THREAD_RUN_FAILED, - AgentStreamEvent.THREAD_RUN_CANCELLING, - AgentStreamEvent.THREAD_RUN_CANCELLED, - AgentStreamEvent.THREAD_RUN_EXPIRED, - }: - event_data_obj = _safe_instantiate(ThreadRun, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_RUN_STEP_CREATED, - AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, - AgentStreamEvent.THREAD_RUN_STEP_COMPLETED, - AgentStreamEvent.THREAD_RUN_STEP_FAILED, - AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, - AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, - }: - event_data_obj = _safe_instantiate(RunStep, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_MESSAGE_CREATED, - AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, - AgentStreamEvent.THREAD_MESSAGE_COMPLETED, - AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, - }: - event_data_obj = _safe_instantiate(ThreadMessage, parsed_data) - elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: - event_data_obj = _safe_instantiate(MessageDeltaChunk, parsed_data) - elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: - event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) - else: - event_data_obj = parsed_data - - return event_type, event_data_obj - - async def _process_event(self, event_data_str: str) -> Tuple[str, Any]: - event_type, event_data_obj = self._parse_event_data(event_data_str) - - if ( - isinstance(event_data_obj, ThreadRun) - and event_data_obj.status == "requires_action" - and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) - ): - await self.submit_tool_outputs(event_data_obj, self.event_handler) - if self.event_handler: - try: - if isinstance(event_data_obj, MessageDeltaChunk): - await self.event_handler.on_message_delta(event_data_obj) - elif isinstance(event_data_obj, ThreadMessage): - await self.event_handler.on_thread_message(event_data_obj) - elif isinstance(event_data_obj, ThreadRun): - await self.event_handler.on_thread_run(event_data_obj) - elif isinstance(event_data_obj, RunStep): - await self.event_handler.on_run_step(event_data_obj) - elif isinstance(event_data_obj, RunStepDeltaChunk): - await self.event_handler.on_run_step_delta(event_data_obj) - elif event_type == AgentStreamEvent.ERROR: - await self.event_handler.on_error(event_data_obj) - elif event_type == AgentStreamEvent.DONE: - await self.event_handler.on_done() - self.done = True # Mark the stream as done - else: - await self.event_handler.on_unhandled_event(event_type, event_data_obj) - except Exception as e: - logging.error(f"Error in event handler for event '{event_type}': {e}") - - return event_type, event_data_obj - - async def until_done(self) -> None: - """ - Iterates through all events until the stream is marked as done. - """ - try: - async for _ in self: - pass # The EventHandler handles the events - except StopAsyncIteration: - pass - - -class AgentRunStream(Iterator[Tuple[str, Any]]): - def __init__( - self, - response_iterator: Iterator[bytes], - submit_tool_outputs: Callable[[ThreadRun, Optional[AgentEventHandler]], None], - event_handler: Optional[AgentEventHandler] = None, - ): - self.response_iterator = response_iterator - self.event_handler = event_handler - self.done = False - self.buffer = "" - self.submit_tool_outputs = submit_tool_outputs - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - close_method = getattr(self.response_iterator, "close", None) - if callable(close_method): - close_method() - - def __iter__(self): - return self - - def __next__(self) -> Tuple[str, Any]: - if self.done: - raise StopIteration - while True: - try: - chunk = next(self.response_iterator) - self.buffer += chunk.decode("utf-8") - except StopIteration: - if self.buffer: - event_data_str, self.buffer = self.buffer, "" - if event_data_str: - return self._process_event(event_data_str) - raise StopIteration - - while "\n\n" in self.buffer: - event_data_str, self.buffer = self.buffer.split("\n\n", 1) - return self._process_event(event_data_str) - - def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: - event_lines = event_data_str.strip().split("\n") - event_type = None - event_data = "" - - for line in event_lines: - if line.startswith("event:"): - event_type = line.split(":", 1)[1].strip() - elif line.startswith("data:"): - event_data = line.split(":", 1)[1].strip() - - if not event_type: - raise ValueError("Event type not specified in the event data.") - - try: - parsed_data = json.loads(event_data) - except json.JSONDecodeError: - parsed_data = event_data - - # Workaround for service bug: Rename 'expires_at' to 'expired_at' - if event_type.startswith("thread.run.step") and isinstance(parsed_data, dict) and "expires_at" in parsed_data: - parsed_data["expired_at"] = parsed_data.pop("expires_at") - - # Map to the appropriate class instance - if event_type in { - AgentStreamEvent.THREAD_RUN_CREATED, - AgentStreamEvent.THREAD_RUN_QUEUED, - AgentStreamEvent.THREAD_RUN_IN_PROGRESS, - AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION, - AgentStreamEvent.THREAD_RUN_COMPLETED, - AgentStreamEvent.THREAD_RUN_FAILED, - AgentStreamEvent.THREAD_RUN_CANCELLING, - AgentStreamEvent.THREAD_RUN_CANCELLED, - AgentStreamEvent.THREAD_RUN_EXPIRED, - }: - event_data_obj = _safe_instantiate(ThreadRun, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_RUN_STEP_CREATED, - AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, - AgentStreamEvent.THREAD_RUN_STEP_COMPLETED, - AgentStreamEvent.THREAD_RUN_STEP_FAILED, - AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, - AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, - }: - event_data_obj = _safe_instantiate(RunStep, parsed_data) - elif event_type in { - AgentStreamEvent.THREAD_MESSAGE_CREATED, - AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, - AgentStreamEvent.THREAD_MESSAGE_COMPLETED, - AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, - }: - event_data_obj = _safe_instantiate(ThreadMessage, parsed_data) - elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: - event_data_obj = _safe_instantiate(MessageDeltaChunk, parsed_data) - elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: - event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) - else: - event_data_obj = parsed_data - - return event_type, event_data_obj - - def _process_event(self, event_data_str: str) -> Tuple[str, Any]: - event_type, event_data_obj = self._parse_event_data(event_data_str) - - if ( - isinstance(event_data_obj, ThreadRun) - and event_data_obj.status == "requires_action" - and isinstance(event_data_obj.required_action, SubmitToolOutputsAction) - ): - self.submit_tool_outputs(event_data_obj, self.event_handler) - if self.event_handler: - try: - if isinstance(event_data_obj, MessageDeltaChunk): - self.event_handler.on_message_delta(event_data_obj) - elif isinstance(event_data_obj, ThreadMessage): - self.event_handler.on_thread_message(event_data_obj) - elif isinstance(event_data_obj, ThreadRun): - self.event_handler.on_thread_run(event_data_obj) - elif isinstance(event_data_obj, RunStep): - self.event_handler.on_run_step(event_data_obj) - elif isinstance(event_data_obj, RunStepDeltaChunk): - self.event_handler.on_run_step_delta(event_data_obj) - elif event_type == AgentStreamEvent.ERROR: - self.event_handler.on_error(event_data_obj) - elif event_type == AgentStreamEvent.DONE: - self.event_handler.on_done() - self.done = True # Mark the stream as done - else: - self.event_handler.on_unhandled_event(event_type, event_data_obj) - except Exception as e: - logging.error(f"Error in event handler for event '{event_type}': {e}") - - return event_type, event_data_obj - - def until_done(self) -> None: - """ - Iterates through all events until the stream is marked as done. - """ - try: - for _ in self: - pass # The EventHandler handles the events - except StopIteration: - pass - - -__all__: List[str] = [ - "AgentEventHandler", - "AgentRunStream", - "AsyncAgentEventHandler", - "AsyncAgentRunStream", - "AsyncFunctionTool", - "AsyncToolSet", - "CodeInterpreterTool", - "FileSearchTool", - "FunctionTool", - "SASTokenCredential", - "Tool", - "ToolSet", -] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/ai/azure-ai-project/azure/ai/project/operations/__init__.py b/sdk/ai/azure-ai-project/azure/ai/project/operations/__init__.py deleted file mode 100644 index 35cf92df96bc..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/operations/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# pylint: disable=wrong-import-position - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from ._patch import * # pylint: disable=unused-wildcard-import - -from ._operations import AgentsOperations # type: ignore -from ._operations import ConnectionsOperations # type: ignore -from ._operations import EvaluationsOperations # type: ignore - -from ._patch import __all__ as _patch_all -from ._patch import * -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "AgentsOperations", - "ConnectionsOperations", - "EvaluationsOperations", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore -_patch_sdk() diff --git a/sdk/ai/azure-ai-project/azure/ai/project/operations/_operations.py b/sdk/ai/azure-ai-project/azure/ai/project/operations/_operations.py deleted file mode 100644 index a427d96f11f2..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/operations/_operations.py +++ /dev/null @@ -1,7396 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import json -import sys -from typing import Any, Callable, Dict, IO, Iterable, List, Optional, TYPE_CHECKING, TypeVar, Union, overload -import urllib.parse - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - StreamClosedError, - StreamConsumedError, - map_error, -) -from azure.core.paging import ItemPaged -from azure.core.pipeline import PipelineResponse -from azure.core.rest import HttpRequest, HttpResponse -from azure.core.tracing.decorator import distributed_trace -from azure.core.utils import case_insensitive_dict - -from .. import _model_base, models as _models -from .._model_base import SdkJSONEncoder, _deserialize -from .._serialization import Serializer -from .._vendor import FileType, prepare_multipart_form_data - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore - -if TYPE_CHECKING: - from .. import _types -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False - - -def build_agents_create_agent_request(**kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/assistants" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_agents_request( - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/assistants" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/assistants/{assistantId}" - path_format_arguments = { - "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_update_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/assistants/{assistantId}" - path_format_arguments = { - "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_delete_agent_request(assistant_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/assistants/{assistantId}" - path_format_arguments = { - "assistantId": _SERIALIZER.url("assistant_id", assistant_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_thread_request(**kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_update_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_delete_thread_request(thread_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_message_request(thread_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/messages" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_messages_request( - thread_id: str, - *, - run_id: Optional[str] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/messages" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if run_id is not None: - _params["runId"] = _SERIALIZER.query("run_id", run_id, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/messages/{messageId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "messageId": _SERIALIZER.url("message_id", message_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_update_message_request(thread_id: str, message_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/messages/{messageId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "messageId": _SERIALIZER.url("message_id", message_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_run_request(thread_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_runs_request( - thread_id: str, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs/{runId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "runId": _SERIALIZER.url("run_id", run_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_update_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs/{runId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "runId": _SERIALIZER.url("run_id", run_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_submit_tool_outputs_to_run_request( # pylint: disable=name-too-long - thread_id: str, run_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs/{runId}/submit_tool_outputs" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "runId": _SERIALIZER.url("run_id", run_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_cancel_run_request(thread_id: str, run_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs/{runId}/cancel" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "runId": _SERIALIZER.url("run_id", run_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_thread_and_run_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/runs" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_run_step_request(thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs/{runId}/steps/{stepId}" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "runId": _SERIALIZER.url("run_id", run_id, "str"), - "stepId": _SERIALIZER.url("step_id", step_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_run_steps_request( - thread_id: str, - run_id: str, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/threads/{threadId}/runs/{runId}/steps" - path_format_arguments = { - "threadId": _SERIALIZER.url("thread_id", thread_id, "str"), - "runId": _SERIALIZER.url("run_id", run_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_files_request( - *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/files" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if purpose is not None: - _params["purpose"] = _SERIALIZER.query("purpose", purpose, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_upload_file_request(**kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/files" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_delete_file_request(file_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/files/{fileId}" - path_format_arguments = { - "fileId": _SERIALIZER.url("file_id", file_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_file_request(file_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/files/{fileId}" - path_format_arguments = { - "fileId": _SERIALIZER.url("file_id", file_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_file_content_request(file_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/files/{fileId}/content" - path_format_arguments = { - "fileId": _SERIALIZER.url("file_id", file_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_vector_stores_request( - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_vector_store_request(**kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_modify_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_delete_vector_store_request(vector_store_id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_vector_store_files_request( # pylint: disable=name-too-long - vector_store_id: str, - *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/files" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if filter is not None: - _params["filter"] = _SERIALIZER.query("filter", filter, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_vector_store_file_request( # pylint: disable=name-too-long - vector_store_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/files" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_vector_store_file_request( # pylint: disable=name-too-long - vector_store_id: str, file_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/files/{fileId}" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - "fileId": _SERIALIZER.url("file_id", file_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_delete_vector_store_file_request( # pylint: disable=name-too-long - vector_store_id: str, file_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/files/{fileId}" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - "fileId": _SERIALIZER.url("file_id", file_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_create_vector_store_file_batch_request( # pylint: disable=name-too-long - vector_store_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/file_batches" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_get_vector_store_file_batch_request( # pylint: disable=name-too-long - vector_store_id: str, batch_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_cancel_vector_store_file_batch_request( # pylint: disable=name-too-long - vector_store_id: str, batch_id: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/cancel" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_agents_list_vector_store_file_batch_files_request( # pylint: disable=name-too-long - vector_store_id: str, - batch_id: str, - *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/vector_stores/{vectorStoreId}/file_batches/{batchId}/files" - path_format_arguments = { - "vectorStoreId": _SERIALIZER.url("vector_store_id", vector_store_id, "str"), - "batchId": _SERIALIZER.url("batch_id", batch_id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if filter is not None: - _params["filter"] = _SERIALIZER.query("filter", filter, "str") - if limit is not None: - _params["limit"] = _SERIALIZER.query("limit", limit, "int") - if order is not None: - _params["order"] = _SERIALIZER.query("order", order, "str") - if after is not None: - _params["after"] = _SERIALIZER.query("after", after, "str") - if before is not None: - _params["before"] = _SERIALIZER.query("before", before, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_connections_list_request( - *, - category: Optional[Union[str, _models.ConnectionType]] = None, - include_all: Optional[bool] = None, - target: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/connections" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if category is not None: - _params["category"] = _SERIALIZER.query("category", category, "str") - if include_all is not None: - _params["includeAll"] = _SERIALIZER.query("include_all", include_all, "bool") - if target is not None: - _params["target"] = _SERIALIZER.query("target", target, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_connections_get_request(connection_name: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/connections/{connectionName}" - path_format_arguments = { - "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_connections_list_secrets_request(connection_name: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/connections/{connectionName}/listsecrets" - path_format_arguments = { - "connectionName": _SERIALIZER.url("connection_name", connection_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_get_request(id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/runs/{id}" - path_format_arguments = { - "id": _SERIALIZER.url("id", id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_create_request(**kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("apiVersion", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/runs:run" - - # Construct parameters - _params["apiVersion"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_list_request( - *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/runs" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if top is not None: - _params["top"] = _SERIALIZER.query("top", top, "int") - if skip is not None: - _params["skip"] = _SERIALIZER.query("skip", skip, "int") - if maxpagesize is not None: - _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_update_request(id: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/runs/{id}" - path_format_arguments = { - "id": _SERIALIZER.url("id", id, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_get_schedule_request(name: str, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/schedules/{name}" - path_format_arguments = { - "name": _SERIALIZER.url("name", name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_create_or_replace_schedule_request( # pylint: disable=name-too-long - name: str, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/schedules/{name}" - path_format_arguments = { - "name": _SERIALIZER.url("name", name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_list_schedule_request( - *, top: Optional[int] = None, skip: Optional[int] = None, maxpagesize: Optional[int] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/schedules" - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if top is not None: - _params["top"] = _SERIALIZER.query("top", top, "int") - if skip is not None: - _params["skip"] = _SERIALIZER.query("skip", skip, "int") - if maxpagesize is not None: - _params["maxpagesize"] = _SERIALIZER.query("maxpagesize", maxpagesize, "int") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_evaluations_delete_schedule_request(name: str, **kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = "/evaluations/schedules/{name}" - path_format_arguments = { - "name": _SERIALIZER.url("name", name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -class AgentsOperations: # pylint: disable=too-many-public-methods - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.project.AIProjectClient`'s - :attr:`agents` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent( - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Creates a new agent. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) - - if body is _Unset: - if model is _Unset: - raise TypeError("missing required argument: model") - body = { - "description": description, - "instructions": instructions, - "metadata": metadata, - "model": model, - "name": name, - "response_format": response_format, - "temperature": temperature, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_agent_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Agent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_agents( - self, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfAgent: - """Gets a list of agents that were previously created. - - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfAgent. The OpenAIPageableListOfAgent is compatible with - MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfAgent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfAgent] = kwargs.pop("cls", None) - - _request = build_agents_list_agents_request( - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfAgent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_agent(self, assistant_id: str, **kwargs: Any) -> _models.Agent: - """Retrieves an existing agent. - - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) - - _request = build_agents_get_agent_request( - assistant_id=assistant_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Agent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def update_agent( - self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_agent( - self, - assistant_id: str, - *, - content_type: str = "application/json", - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_agent( - self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update_agent( - self, - assistant_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: Optional[str] = None, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.Agent: - """Modifies an existing agent. - - :param assistant_id: The ID of the agent to modify. Required. - :type assistant_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword model: The ID of the model to use. Default value is None. - :paramtype model: str - :keyword name: The modified name for the agent to use. Default value is None. - :paramtype name: str - :keyword description: The modified description for the agent to use. Default value is None. - :paramtype description: str - :keyword instructions: The modified system instructions for the new agent to use. Default value - is None. - :paramtype instructions: str - :keyword tools: The modified collection of tools to enable for the agent. Default value is - None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, - the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool - requires a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Agent] = kwargs.pop("cls", None) - - if body is _Unset: - body = { - "description": description, - "instructions": instructions, - "metadata": metadata, - "model": model, - "name": name, - "response_format": response_format, - "temperature": temperature, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_agent_request( - assistant_id=assistant_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Agent, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete_agent(self, assistant_id: str, **kwargs: Any) -> _models.AgentDeletionStatus: - """Deletes an agent. - - :param assistant_id: Identifier of the agent. Required. - :type assistant_id: str - :return: AgentDeletionStatus. The AgentDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.AgentDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_agent_request( - assistant_id=assistant_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_thread( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_thread( - self, - *, - content_type: str = "application/json", - messages: Optional[List[_models.ThreadMessageOptions]] = None, - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword messages: The initial messages to associate with the new thread. Default value is - None. - :paramtype messages: list[~azure.ai.project.models.ThreadMessageOptions] - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_thread( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_thread( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - messages: Optional[List[_models.ThreadMessageOptions]] = None, - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Creates a new thread. Threads contain messages and can be run by agents. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword messages: The initial messages to associate with the new thread. Default value is - None. - :paramtype messages: list[~azure.ai.project.models.ThreadMessageOptions] - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"messages": messages, "metadata": metadata, "tool_resources": tool_resources} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_thread_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentThread, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_thread(self, thread_id: str, **kwargs: Any) -> _models.AgentThread: - """Gets information about an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) - - _request = build_agents_get_thread_request( - thread_id=thread_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentThread, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def update_thread( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_thread( - self, - thread_id: str, - *, - content_type: str = "application/json", - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_thread( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update_thread( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_resources: Optional[_models.ToolResources] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.AgentThread: - """Modifies an existing thread. - - :param thread_id: The ID of the thread to modify. Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_resources: A set of resources that are made available to the agent's tools in - this thread. The resources are specific to the - type of tool. For example, the ``code_interpreter`` tool requires a list of file IDs, while - the ``file_search`` tool requires - a list of vector store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: AgentThread. The AgentThread is compatible with MutableMapping - :rtype: ~azure.ai.project.models.AgentThread - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AgentThread] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"metadata": metadata, "tool_resources": tool_resources} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_thread_request( - thread_id=thread_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.AgentThread, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete_thread(self, thread_id: str, **kwargs: Any) -> _models.ThreadDeletionStatus: - """Deletes an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :return: ThreadDeletionStatus. The ThreadDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_thread_request( - thread_id=thread_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_message( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_message( - self, - thread_id: str, - *, - role: Union[str, _models.MessageRole], - content: str, - content_type: str = "application/json", - attachments: Optional[List[_models.MessageAttachment]] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword role: The role of the entity that is creating the message. Allowed values include: - - - * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Known values are: "user" and "assistant". Required. - :paramtype role: str or ~azure.ai.project.models.MessageRole - :keyword content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :paramtype content: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword attachments: A list of files attached to the message, and the tools they should be - added to. Default value is None. - :paramtype attachments: list[~azure.ai.project.models.MessageAttachment] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_message( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_message( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - role: Union[str, _models.MessageRole] = _Unset, - content: str = _Unset, - attachments: Optional[List[_models.MessageAttachment]] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Creates a new message on a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword role: The role of the entity that is creating the message. Allowed values include: - - - * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. - * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Known values are: "user" and "assistant". Required. - :paramtype role: str or ~azure.ai.project.models.MessageRole - :keyword content: The textual content of the initial message. Currently, robust input including - images and annotated text may only be provided via - a separate call to the create message API. Required. - :paramtype content: str - :keyword attachments: A list of files attached to the message, and the tools they should be - added to. Default value is None. - :paramtype attachments: list[~azure.ai.project.models.MessageAttachment] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - - if body is _Unset: - if role is _Unset: - raise TypeError("missing required argument: role") - if content is _Unset: - raise TypeError("missing required argument: content") - body = {"attachments": attachments, "content": content, "metadata": metadata, "role": role} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_message_request( - thread_id=thread_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_messages( - self, - thread_id: str, - *, - run_id: Optional[str] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfThreadMessage: - """Gets a list of messages that exist on a thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword run_id: Filter messages by the run ID that generated them. Default value is None. - :paramtype run_id: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfThreadMessage. The OpenAIPageableListOfThreadMessage is compatible - with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfThreadMessage] = kwargs.pop("cls", None) - - _request = build_agents_list_messages_request( - thread_id=thread_id, - run_id=run_id, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_message(self, thread_id: str, message_id: str, **kwargs: Any) -> _models.ThreadMessage: - """Gets an existing message from an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - - _request = build_agents_get_message_request( - thread_id=thread_id, - message_id=message_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def update_message( - self, thread_id: str, message_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_message( - self, - thread_id: str, - message_id: str, - *, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_message( - self, thread_id: str, message_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update_message( - self, - thread_id: str, - message_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadMessage: - """Modifies an existing message on an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param message_id: Identifier of the message. Required. - :type message_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadMessage. The ThreadMessage is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadMessage - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadMessage] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"metadata": metadata} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_message_request( - thread_id=thread_id, - message_id=message_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadMessage, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_run( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_run( - self, - thread_id: str, - *, - assistant_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_run( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_run( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - if assistant_id is _Unset: - raise TypeError("missing required argument: assistant_id") - body = { - "additional_instructions": additional_instructions, - "additional_messages": additional_messages, - "assistant_id": assistant_id, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "stream": stream_parameter, - "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, - "top_p": top_p, - "truncation_strategy": truncation_strategy, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_run_request( - thread_id=thread_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_runs( - self, - thread_id: str, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfThreadRun: - """Gets a list of runs for a specified thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfThreadRun. The OpenAIPageableListOfThreadRun is compatible with - MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfThreadRun] = kwargs.pop("cls", None) - - _request = build_agents_list_runs_request( - thread_id=thread_id, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: - """Gets an existing run from an existing thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - _request = build_agents_get_run_request( - thread_id=thread_id, - run_id=run_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def update_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_run( - self, - thread_id: str, - run_id: str, - *, - content_type: str = "application/json", - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Modifies an existing thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"metadata": metadata} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_update_run_request( - thread_id=thread_id, - run_id=run_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - stream_parameter: Optional[bool] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. - :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword stream_parameter: If true, returns a stream of events that happen during the Run as - server-sent events, terminating when the run enters a terminal state. Default value is None. - :paramtype stream_parameter: bool - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - stream_parameter: Optional[bool] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: A list of tools for which the outputs are being submitted. Required. - :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] - :keyword stream_parameter: If true, returns a stream of events that happen during the Run as - server-sent events, terminating when the run enters a terminal state. Default value is None. - :paramtype stream_parameter: bool - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - if tool_outputs is _Unset: - raise TypeError("missing required argument: tool_outputs") - body = {"stream": stream_parameter, "tool_outputs": tool_outputs} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_submit_tool_outputs_to_run_request( - thread_id=thread_id, - run_id=run_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def cancel_run(self, thread_id: str, run_id: str, **kwargs: Any) -> _models.ThreadRun: - """Cancels a run of an in progress thread. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - _request = build_agents_cancel_run_request( - thread_id=thread_id, - run_id=run_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_thread_and_run( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_thread_and_run( - self, - *, - assistant_id: str, - content_type: str = "application/json", - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :keyword assistant_id: The ID of the agent for which the thread should be created. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword thread: The details used to create the new thread. If no thread is provided, an empty - one will be created. Default value is None. - :paramtype thread: ~azure.ai.project.models.AgentThreadCreationOptions - :keyword model: The overridden model that the agent should use to run the thread. Default value - is None. - :paramtype model: str - :keyword instructions: The overridden system instructions the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword tools: The overridden list of enabled tools the agent should use to run the thread. - Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.UpdateToolResourcesOptions - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort to use only - the number of completion tokens specified, across multiple turns of the run. If the run - exceeds the number of completion tokens - specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more - info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_thread_and_run( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_thread_and_run( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - thread: Optional[_models.AgentThreadCreationOptions] = None, - model: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.UpdateToolResourcesOptions] = None, - stream_parameter: Optional[bool] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new agent thread and immediately starts a run using that new thread. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent for which the thread should be created. Required. - :paramtype assistant_id: str - :keyword thread: The details used to create the new thread. If no thread is provided, an empty - one will be created. Default value is None. - :paramtype thread: ~azure.ai.project.models.AgentThreadCreationOptions - :keyword model: The overridden model that the agent should use to run the thread. Default value - is None. - :paramtype model: str - :keyword instructions: The overridden system instructions the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword tools: The overridden list of enabled tools the agent should use to run the thread. - Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: Override the tools the agent can use for this run. This is useful for - modifying the behavior on a per-run basis. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.UpdateToolResourcesOptions - :keyword stream_parameter: If ``true``\\ , returns a stream of events that happen during the - Run as server-sent events, - terminating when the Run enters a terminal state with a ``data: [DONE]`` message. Default - value is None. - :paramtype stream_parameter: bool - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort to use only - the number of completion tokens specified, across multiple turns of the run. If the run - exceeds the number of completion tokens - specified, the run will end with status ``incomplete``. See ``incomplete_details`` for more - info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.ThreadRun] = kwargs.pop("cls", None) - - if body is _Unset: - if assistant_id is _Unset: - raise TypeError("missing required argument: assistant_id") - body = { - "assistant_id": assistant_id, - "instructions": instructions, - "max_completion_tokens": max_completion_tokens, - "max_prompt_tokens": max_prompt_tokens, - "metadata": metadata, - "model": model, - "response_format": response_format, - "stream": stream_parameter, - "temperature": temperature, - "thread": thread, - "tool_choice": tool_choice, - "tool_resources": tool_resources, - "tools": tools, - "top_p": top_p, - "truncation_strategy": truncation_strategy, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_thread_and_run_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ThreadRun, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_run_step(self, thread_id: str, run_id: str, step_id: str, **kwargs: Any) -> _models.RunStep: - """Gets a single run step from a thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :param step_id: Identifier of the run step. Required. - :type step_id: str - :return: RunStep. The RunStep is compatible with MutableMapping - :rtype: ~azure.ai.project.models.RunStep - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.RunStep] = kwargs.pop("cls", None) - - _request = build_agents_get_run_step_request( - thread_id=thread_id, - run_id=run_id, - step_id=step_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.RunStep, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_run_steps( - self, - thread_id: str, - run_id: str, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfRunStep: - """Gets a list of run steps from a thread run. - - :param thread_id: Identifier of the thread. Required. - :type thread_id: str - :param run_id: Identifier of the run. Required. - :type run_id: str - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfRunStep. The OpenAIPageableListOfRunStep is compatible with - MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfRunStep - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfRunStep] = kwargs.pop("cls", None) - - _request = build_agents_list_run_steps_request( - thread_id=thread_id, - run_id=run_id, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfRunStep, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_files( - self, *, purpose: Optional[Union[str, _models.FilePurpose]] = None, **kwargs: Any - ) -> _models.FileListResponse: - """Gets a list of previously uploaded files. - - :keyword purpose: The purpose of the file. Known values are: "fine-tune", "fine-tune-results", - "assistants", "assistants_output", "batch", "batch_output", and "vision". Default value is - None. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :return: FileListResponse. The FileListResponse is compatible with MutableMapping - :rtype: ~azure.ai.project.models.FileListResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.FileListResponse] = kwargs.pop("cls", None) - - _request = build_agents_list_files_request( - purpose=purpose, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.FileListResponse, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file( - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: The file data, in bytes. Required. - :paramtype file: ~azure.ai.project._vendor.FileType - :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and - Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and - ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", - "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :keyword filename: The name of the file. Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def upload_file( - self, - body: JSON = _Unset, - *, - file: FileType = _Unset, - purpose: Union[str, _models.FilePurpose] = _Unset, - filename: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Is one of the following types: JSON Required. - :type body: JSON - :keyword file: The file data, in bytes. Required. - :paramtype file: ~azure.ai.project._vendor.FileType - :keyword purpose: The intended purpose of the uploaded file. Use ``assistants`` for Agents and - Message files, ``vision`` for Agents image file inputs, ``batch`` for Batch API, and - ``fine-tune`` for Fine-tuning. Known values are: "fine-tune", "fine-tune-results", - "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :keyword filename: The name of the file. Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) - - if body is _Unset: - if file is _Unset: - raise TypeError("missing required argument: file") - if purpose is _Unset: - raise TypeError("missing required argument: purpose") - body = {"file": file, "filename": filename, "purpose": purpose} - body = {k: v for k, v in body.items() if v is not None} - _body = body.as_dict() if isinstance(body, _model_base.Model) else body - _file_fields: List[str] = ["file"] - _data_fields: List[str] = ["purpose", "filename"] - _files, _data = prepare_multipart_form_data(_body, _file_fields, _data_fields) - - _request = build_agents_upload_file_request( - api_version=self._config.api_version, - files=_files, - data=_data, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete_file(self, file_id: str, **kwargs: Any) -> _models.FileDeletionStatus: - """Delete a previously uploaded file. - - :param file_id: The ID of the file to delete. Required. - :type file_id: str - :return: FileDeletionStatus. The FileDeletionStatus is compatible with MutableMapping - :rtype: ~azure.ai.project.models.FileDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.FileDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_file_request( - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.FileDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: - """Returns information about a specific file. Does not retrieve file content. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIFile] = kwargs.pop("cls", None) - - _request = build_agents_get_file_request( - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentResponse: - """Returns information about a specific file. Does not retrieve file content. - - :param file_id: The ID of the file to retrieve. Required. - :type file_id: str - :return: FileContentResponse. The FileContentResponse is compatible with MutableMapping - :rtype: ~azure.ai.project.models.FileContentResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.FileContentResponse] = kwargs.pop("cls", None) - - _request = build_agents_get_file_content_request( - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.FileContentResponse, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_vector_stores( - self, - *, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfVectorStore: - """Returns a list of vector stores. - - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfVectorStore. The OpenAIPageableListOfVectorStore is compatible - with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfVectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfVectorStore] = kwargs.pop("cls", None) - - _request = build_agents_list_vector_stores_request( - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfVectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_vector_store( - self, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store( - self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - - if body is _Unset: - body = { - "chunking_strategy": chunking_strategy, - "expires_after": expires_after, - "file_ids": file_ids, - "metadata": metadata, - "name": name, - } - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_vector_store_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStore: - """Returns the vector store object matching the specified ID. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - - _request = build_agents_get_vector_store_request( - vector_store_id=vector_store_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def modify_vector_store( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def modify_vector_store( - self, - vector_store_id: str, - *, - content_type: str = "application/json", - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def modify_vector_store( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def modify_vector_store( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any - ) -> _models.VectorStore: - """The ID of the vector store to modify. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStore] = kwargs.pop("cls", None) - - if body is _Unset: - body = {"expires_after": expires_after, "metadata": metadata, "name": name} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_modify_vector_store_request( - vector_store_id=vector_store_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStore, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete_vector_store(self, vector_store_id: str, **kwargs: Any) -> _models.VectorStoreDeletionStatus: - """Deletes the vector store object matching the specified ID. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :return: VectorStoreDeletionStatus. The VectorStoreDeletionStatus is compatible with - MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_vector_store_request( - vector_store_id=vector_store_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_vector_store_files( - self, - vector_store_id: str, - *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfVectorStoreFile: - """Returns a list of vector store files. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", - and "cancelled". Default value is None. - :paramtype filter: str or ~azure.ai.project.models.VectorStoreFileStatusFilter - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is - compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfVectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) - - _request = build_agents_list_vector_store_files_request( - vector_store_id=vector_store_id, - filter=filter, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_vector_store_file( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file( - self, - vector_store_id: str, - *, - file_id: str, - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_id: Identifier of the file. Required. - :paramtype file_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_file( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_id: str = _Unset, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFile: - """Create a vector store file by attaching a file to a vector store. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_id: Identifier of the file. Required. - :paramtype file_id: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) - - if body is _Unset: - if file_id is _Unset: - raise TypeError("missing required argument: file_id") - body = {"chunking_strategy": chunking_strategy, "file_id": file_id} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_vector_store_file_request( - vector_store_id=vector_store_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_vector_store_file(self, vector_store_id: str, file_id: str, **kwargs: Any) -> _models.VectorStoreFile: - """Retrieves a vector store file. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param file_id: Identifier of the file. Required. - :type file_id: str - :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) - - _request = build_agents_get_vector_store_file_request( - vector_store_id=vector_store_id, - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete_vector_store_file( - self, vector_store_id: str, file_id: str, **kwargs: Any - ) -> _models.VectorStoreFileDeletionStatus: - """Delete a vector store file. This will remove the file from the vector store but the file itself - will not be deleted. - To delete the file, use the delete file endpoint. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param file_id: Identifier of the file. Required. - :type file_id: str - :return: VectorStoreFileDeletionStatus. The VectorStoreFileDeletionStatus is compatible with - MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileDeletionStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFileDeletionStatus] = kwargs.pop("cls", None) - - _request = build_agents_delete_vector_store_file_request( - vector_store_id=vector_store_id, - file_id=file_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileDeletionStatus, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create_vector_store_file_batch( - self, vector_store_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_batch( - self, - vector_store_id: str, - *, - file_ids: List[str], - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_batch( - self, vector_store_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_file_batch( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - file_ids: List[str] = _Unset, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - - if body is _Unset: - if file_ids is _Unset: - raise TypeError("missing required argument: file_ids") - body = {"chunking_strategy": chunking_strategy, "file_ids": file_ids} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_agents_create_vector_store_file_batch_request( - vector_store_id=vector_store_id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_vector_store_file_batch( - self, vector_store_id: str, batch_id: str, **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Retrieve a vector store file batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param batch_id: Identifier of the file batch. Required. - :type batch_id: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - - _request = build_agents_get_vector_store_file_batch_request( - vector_store_id=vector_store_id, - batch_id=batch_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def cancel_vector_store_file_batch( - self, vector_store_id: str, batch_id: str, **kwargs: Any - ) -> _models.VectorStoreFileBatch: - """Cancel a vector store file batch. This attempts to cancel the processing of files in this batch - as soon as possible. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param batch_id: Identifier of the file batch. Required. - :type batch_id: str - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) - - _request = build_agents_cancel_vector_store_file_batch_request( - vector_store_id=vector_store_id, - batch_id=batch_id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.VectorStoreFileBatch, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_vector_store_file_batch_files( - self, - vector_store_id: str, - batch_id: str, - *, - filter: Optional[Union[str, _models.VectorStoreFileStatusFilter]] = None, - limit: Optional[int] = None, - order: Optional[Union[str, _models.ListSortOrder]] = None, - after: Optional[str] = None, - before: Optional[str] = None, - **kwargs: Any - ) -> _models.OpenAIPageableListOfVectorStoreFile: - """Returns a list of vector store files in a batch. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param batch_id: Identifier of the file batch. Required. - :type batch_id: str - :keyword filter: Filter by file status. Known values are: "in_progress", "completed", "failed", - and "cancelled". Default value is None. - :paramtype filter: str or ~azure.ai.project.models.VectorStoreFileStatusFilter - :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. Default value is None. - :paramtype limit: int - :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order - and desc for descending order. Known values are: "asc" and "desc". Default value is None. - :paramtype order: str or ~azure.ai.project.models.ListSortOrder - :keyword after: A cursor for use in pagination. after is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the - list. Default value is None. - :paramtype after: str - :keyword before: A cursor for use in pagination. before is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, ending with - obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of - the list. Default value is None. - :paramtype before: str - :return: OpenAIPageableListOfVectorStoreFile. The OpenAIPageableListOfVectorStoreFile is - compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIPageableListOfVectorStoreFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.OpenAIPageableListOfVectorStoreFile] = kwargs.pop("cls", None) - - _request = build_agents_list_vector_store_file_batch_files_request( - vector_store_id=vector_store_id, - batch_id=batch_id, - filter=filter, - limit=limit, - order=order, - after=after, - before=before, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.OpenAIPageableListOfVectorStoreFile, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - -class ConnectionsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.project.AIProjectClient`'s - :attr:`connections` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace - def _list( - self, - *, - category: Optional[Union[str, _models.ConnectionType]] = None, - include_all: Optional[bool] = None, - target: Optional[str] = None, - **kwargs: Any - ) -> _models._models.ConnectionsListResponse: - """List the details of all the connections (not including their credentials). - - :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". Default value is None. - :paramtype category: str or ~azure.ai.project.models.ConnectionType - :keyword include_all: Indicates whether to list datastores. Service default: do not list - datastores. Default value is None. - :paramtype include_all: bool - :keyword target: Target of the workspace connection. Default value is None. - :paramtype target: str - :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping - :rtype: ~azure.ai.project.models._models.ConnectionsListResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) - - _request = build_connections_list_request( - category=category, - include_all=include_all, - target=target, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize( - _models._models.ConnectionsListResponse, response.json() # pylint: disable=protected-access - ) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def _get(self, connection_name: str, **kwargs: Any) -> _models._models.ConnectionsListSecretsResponse: - """Get the details of a single connection, without credentials. - - :param connection_name: Connection Name. Required. - :type connection_name: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.project.models._models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) - - _request = build_connections_get_request( - connection_name=connection_name, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize( - _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access - ) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def _list_secrets( - self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... - @overload - def _list_secrets( - self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... - @overload - def _list_secrets( - self, connection_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... - - @distributed_trace - def _list_secrets( - self, connection_name: str, body: Union[JSON, IO[bytes]] = _Unset, *, ignored: str = _Unset, **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: - """Get the details of a single connection, including credentials (if available). - - :param connection_name: Connection Name. Required. - :type connection_name: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword ignored: The body is ignored. TODO: Can we remove this?. Required. - :paramtype ignored: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.project.models._models.ConnectionsListSecretsResponse - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) - - if body is _Unset: - if ignored is _Unset: - raise TypeError("missing required argument: ignored") - body = {"ignored": ignored} - body = {k: v for k, v in body.items() if v is not None} - content_type = content_type or "application/json" - _content = None - if isinstance(body, (IOBase, bytes)): - _content = body - else: - _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_connections_list_secrets_request( - connection_name=connection_name, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize( - _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access - ) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - -class EvaluationsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.ai.project.AIProjectClient`'s - :attr:`evaluations` attribute. - """ - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace - def get(self, id: str, **kwargs: Any) -> _models.Evaluation: - """Resource read operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - _request = build_evaluations_get_request( - id=id, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @overload - def create( - self, evaluation: _models.Evaluation, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Required. - :type evaluation: ~azure.ai.project.models.Evaluation - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create(self, evaluation: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Required. - :type evaluation: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create( - self, evaluation: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Required. - :type evaluation: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create(self, evaluation: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any) -> _models.Evaluation: - """Run the evaluation. - - :param evaluation: Evaluation to run. Is one of the following types: Evaluation, JSON, - IO[bytes] Required. - :type evaluation: ~azure.ai.project.models.Evaluation or JSON or IO[bytes] - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(evaluation, (IOBase, bytes)): - _content = evaluation - else: - _content = json.dumps(evaluation, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_evaluations_create_request( - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any - ) -> Iterable["_models.Evaluation"]: - """Resource list operation template. - - :keyword top: The number of result items to return. Default value is None. - :paramtype top: int - :keyword skip: The number of result items to skip. Default value is None. - :paramtype skip: int - :return: An iterator like instance of Evaluation - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.project.models.Evaluation] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.Evaluation]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_evaluations_list_request( - top=top, - skip=skip, - maxpagesize=maxpagesize, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.Evaluation], deserialized["value"]) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, iter(list_of_elem) - - def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return ItemPaged(get_next, extract_data) - - @overload - def update( - self, - id: str, - resource: _models.Evaluation, - *, - content_type: str = "application/merge-patch+json", - **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: ~azure.ai.project.models.Evaluation - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update( - self, id: str, resource: JSON, *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def update( - self, id: str, resource: IO[bytes], *, content_type: str = "application/merge-patch+json", **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Required. - :type resource: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/merge-patch+json". - :paramtype content_type: str - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def update( - self, id: str, resource: Union[_models.Evaluation, JSON, IO[bytes]], **kwargs: Any - ) -> _models.Evaluation: - """Resource update operation template. - - :param id: Identifier of the evaluation. Required. - :type id: str - :param resource: The resource instance. Is one of the following types: Evaluation, JSON, - IO[bytes] Required. - :type resource: ~azure.ai.project.models.Evaluation or JSON or IO[bytes] - :return: Evaluation. The Evaluation is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Evaluation - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.Evaluation] = kwargs.pop("cls", None) - - content_type = content_type or "application/merge-patch+json" - _content = None - if isinstance(resource, (IOBase, bytes)): - _content = resource - else: - _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_evaluations_update_request( - id=id, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.Evaluation, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_schedule(self, name: str, **kwargs: Any) -> _models.EvaluationSchedule: - """Resource read operation template. - - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. - :type name: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.project.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) - - _request = build_evaluations_get_schedule_request( - name=name, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.EvaluationSchedule, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @overload - def create_or_replace_schedule( - self, name: str, resource: _models.EvaluationSchedule, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. - :type name: str - :param resource: The resource instance. Required. - :type resource: ~azure.ai.project.models.EvaluationSchedule - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.project.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_or_replace_schedule( - self, name: str, resource: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. - :type name: str - :param resource: The resource instance. Required. - :type resource: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.project.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_or_replace_schedule( - self, name: str, resource: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. - :type name: str - :param resource: The resource instance. Required. - :type resource: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.project.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_or_replace_schedule( - self, name: str, resource: Union[_models.EvaluationSchedule, JSON, IO[bytes]], **kwargs: Any - ) -> _models.EvaluationSchedule: - """Create or replace operation template. - - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. - :type name: str - :param resource: The resource instance. Is one of the following types: EvaluationSchedule, - JSON, IO[bytes] Required. - :type resource: ~azure.ai.project.models.EvaluationSchedule or JSON or IO[bytes] - :return: EvaluationSchedule. The EvaluationSchedule is compatible with MutableMapping - :rtype: ~azure.ai.project.models.EvaluationSchedule - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = kwargs.pop("params", {}) or {} - - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.EvaluationSchedule] = kwargs.pop("cls", None) - - content_type = content_type or "application/json" - _content = None - if isinstance(resource, (IOBase, bytes)): - _content = resource - else: - _content = json.dumps(resource, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - - _request = build_evaluations_create_or_replace_schedule_request( - name=name, - content_type=content_type, - api_version=self._config.api_version, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.EvaluationSchedule, response.json()) - - if cls: - return cls(pipeline_response, deserialized, response_headers) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list_schedule( - self, *, top: Optional[int] = None, skip: Optional[int] = None, **kwargs: Any - ) -> Iterable["_models.EvaluationSchedule"]: - """Resource list operation template. - - :keyword top: The number of result items to return. Default value is None. - :paramtype top: int - :keyword skip: The number of result items to skip. Default value is None. - :paramtype skip: int - :return: An iterator like instance of EvaluationSchedule - :rtype: ~azure.core.paging.ItemPaged[~azure.ai.project.models.EvaluationSchedule] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - maxpagesize = kwargs.pop("maxpagesize", None) - cls: ClsType[List[_models.EvaluationSchedule]] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - - _request = build_evaluations_list_schedule_request( - top=top, - skip=skip, - maxpagesize=maxpagesize, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url( - "self._config.subscription_id", self._config.subscription_id, "str" - ), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - return _request - - def extract_data(pipeline_response): - deserialized = pipeline_response.http_response.json() - list_of_elem = _deserialize(List[_models.EvaluationSchedule], deserialized["value"]) - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return deserialized.get("nextLink") or None, iter(list_of_elem) - - def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - return pipeline_response - - return ItemPaged(get_next, extract_data) - - @distributed_trace - def delete_schedule(self, name: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements - """Resource delete operation template. - - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. - :type name: str - :return: None - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} - - cls: ClsType[None] = kwargs.pop("cls", None) - - _request = build_evaluations_delete_schedule_request( - name=name, - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), - "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), - "resourceGroupName": self._serialize.url( - "self._config.resource_group_name", self._config.resource_group_name, "str" - ), - "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response) - - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - - if cls: - return cls(pipeline_response, None, response_headers) # type: ignore diff --git a/sdk/ai/azure-ai-project/azure/ai/project/operations/_patch.py b/sdk/ai/azure-ai-project/azure/ai/project/operations/_patch.py deleted file mode 100644 index 30d23f0ccd59..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/operations/_patch.py +++ /dev/null @@ -1,1982 +0,0 @@ -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -import sys, io, logging, os, time -from io import IOBase -from typing import List, Iterable, Union, IO, Any, Dict, Optional, overload, TYPE_CHECKING, Iterator, cast - -# from zoneinfo import ZoneInfo -from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated -from ._operations import AgentsOperations as AgentsOperationsGenerated -from ..models._enums import AuthenticationType, ConnectionType -from ..models._models import ConnectionsListSecretsResponse, ConnectionsListResponse -from .._types import AgentsApiResponseFormatOption -from ..models._patch import ConnectionProperties -from ..models._enums import FilePurpose -from .._vendor import FileType -from .. import models as _models - -from azure.core.tracing.decorator import distributed_trace - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from .. import _types - -JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object -_Unset: Any = object() - -logger = logging.getLogger(__name__) - - -class InferenceOperations: - - def __init__(self, outer_instance): - self.outer_instance = outer_instance - - @distributed_trace - def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": - """Get an authenticated ChatCompletionsClient (from the package azure-ai-inference) for the default - Serverless connection. The Serverless connection must have a Chat Completions AI model deployment. - The package `azure-ai-inference` must be installed prior to calling this method. - - :return: An authenticated chat completions client - :rtype: ~azure.ai.inference.models.ChatCompletionsClient - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connection = self.outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No serverless connection found") - - try: - from azure.ai.inference import ChatCompletionsClient - except ModuleNotFoundError as _: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) - ) - elif connection.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" - ) - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential - ) - elif connection.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. - logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" - ) - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace - def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": - """Get an authenticated EmbeddingsClient (from the package azure-ai-inference) for the default - Serverless connection. The Serverless connection must have a Text Embeddings AI model deployment. - The package `azure-ai-inference` must be installed prior to calling this method. - - :return: An authenticated chat completions client - :rtype: ~azure.ai.inference.models.EmbeddingsClient - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connection = self.outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No serverless connection found") - - try: - from azure.ai.inference import EmbeddingsClient - except ModuleNotFoundError as _: - raise ModuleNotFoundError( - "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" - ) - from azure.core.credentials import AzureKeyCredential - - client = EmbeddingsClient( - endpoint=connection.authentication_type, credential=AzureKeyCredential(connection.key) - ) - elif connection.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" - ) - client = EmbeddingsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential - ) - elif connection.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. - logger.debug( - "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" - ) - client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) - else: - raise ValueError("Unknown authentication type") - - return client - - @distributed_trace - def get_azure_openai_client(self, **kwargs) -> "AzureOpenAI": - """Get an authenticated AzureOpenAI client (from the `openai` package) for the default - Azure OpenAI connection. The package `openai` must be installed prior to calling this method. - - :return: An authenticated AzureOpenAI client - :rtype: ~openai.AzureOpenAI - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connection = self.outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No Azure OpenAI connection found") - - try: - from openai import AzureOpenAI - except ModuleNotFoundError as _: - raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai'") - - # Pick latest GA version from the "Data plane - Inference" row in the table - # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - AZURE_OPENAI_API_VERSION = "2024-06-01" - - if connection.authentication_type == AuthenticationType.API_KEY: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" - ) - client = AzureOpenAI( - api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION - ) - elif connection.authentication_type == AuthenticationType.AAD: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" - ) - try: - from azure.identity import get_bearer_token_provider - except ModuleNotFoundError as _: - raise ModuleNotFoundError( - "azure.identity package not installed. Please install it using 'pip install azure.identity'" - ) - client = AzureOpenAI( - # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION, - ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") - client = AzureOpenAI( - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION, - ) - else: - raise ValueError("Unknown authentication type") - - return client - - -class ConnectionsOperations(ConnectionsOperationsGenerated): - - @distributed_trace - def get_default( - self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: - """Get the properties of the default connection of a certain connection type, with or without - populating authentication credentials. - - :param connection_type: The connection type. Required. - :type connection_type: ~azure.ai.project.models._models.ConnectionType - :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. - :type with_credentials: bool - :return: The connection properties - :rtype: ~azure.ai.project.models._models.ConnectionProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_type: - raise ValueError("You must specify an connection type") - # Since there is no notion of default connection at the moment, list all connections in the category - # and return the first one - connection_properties_list = self.list(connection_type=connection_type, **kwargs) - if len(connection_properties_list) > 0: - if with_credentials: - return self.get( - connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs - ) - else: - return connection_properties_list[0] - else: - return None - - @distributed_trace - def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: - """Get the properties of a single connection, given its connection name, with or without - populating authentication credentials. - - :param connection_name: Connection Name. Required. - :type connection_name: str - :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. - :type with_credentials: bool - :return: The connection properties - :rtype: ~azure.ai.project.models._models.ConnectionProperties - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - if not connection_name: - raise ValueError("Connection name cannot be empty") - if with_credentials: - connection: ConnectionsListSecretsResponse = self._list_secrets( - connection_name=connection_name, ignored="ignore", **kwargs - ) - if connection.properties.auth_type == AuthenticationType.AAD: - return ConnectionProperties(connection=connection, token_credential=self._config.credential) - elif connection.properties.auth_type == AuthenticationType.SAS: - from ..models._patch import SASTokenCredential - - token_credential = SASTokenCredential( - sas_token=connection.properties.credentials.sas, - credential=self._config.credential, - subscription_id=self._config.subscription_id, - resource_group_name=self._config.resource_group_name, - project_name=self._config.project_name, - connection_name=connection_name, - ) - return ConnectionProperties(connection=connection, token_credential=token_credential) - - return ConnectionProperties(connection=connection) - else: - return ConnectionProperties(connection=self._get(connection_name=connection_name, **kwargs)) - - @distributed_trace - def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) -> Iterable[ConnectionProperties]: - """List the properties of all connections, or all connections of a certain connection type. - - :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. - If not provided, all connections are listed. - :type connection_type: ~azure.ai.project.models._models.ConnectionType - :return: A list of connection properties - :rtype: Iterable[~azure.ai.project.models._models.ConnectionProperties] - :raises ~azure.core.exceptions.HttpResponseError: - """ - kwargs.setdefault("merge_span", True) - connections_list: ConnectionsListResponse = self._list(include_all=True, category=connection_type, **kwargs) - - # Iterate to create the simplified result property - connection_properties_list: List[ConnectionProperties] = [] - for connection in connections_list.value: - connection_properties_list.append(ConnectionProperties(connection=connection)) - - return connection_properties_list - - -class AgentsOperations(AgentsOperationsGenerated): - @overload - def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent( - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword tools: The collection of tools to enable for the new agent. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword tool_resources: A set of resources that are used by the agent's tools. The resources - are specific to the type of tool. For example, the ``code_interpreter`` - tool requires a list of file IDs, while the ``file_search`` tool requires a list of vector - store IDs. Default value is None. - :paramtype tool_resources: ~azure.ai.project.models.ToolResources - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent( - self, - *, - model: str, - content_type: str = "application/json", - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.Agent: - """Creates a new agent. - - :keyword model: The ID of the model to use. Required. - :paramtype model: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword name: The name of the new agent. Default value is None. - :paramtype name: str - :keyword description: The description of the new agent. Default value is None. - :paramtype description: str - :keyword instructions: The system instructions for the new agent to use. Default value is None. - :paramtype instructions: str - :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). Default value is None. - :paramtype toolset: ~azure.ai.project.models.ToolSet - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output more random, - while lower values like 0.2 will make it more focused and deterministic. Default value is - None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. - So 0.1 means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword response_format: The response format of the tool calls used by this agent. Is one of - the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_agent(self, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: - """Creates a new agent. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: Agent. The Agent is compatible with MutableMapping - :rtype: ~azure.ai.project.models.Agent - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_agent( - self, - body: Union[JSON, IO[bytes]] = _Unset, - *, - model: str = _Unset, - name: Optional[str] = None, - description: Optional[str] = None, - instructions: Optional[str] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.ToolSet] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - content_type: str = "application/json", - **kwargs: Any, - ) -> _models.Agent: - """ - Creates a new agent with various configurations, delegating to the generated operations. - - :param body: JSON or IO[bytes]. Required if `model` is not provided. - :param model: The ID of the model to use. Required if `body` is not provided. - :param name: The name of the new agent. - :param description: A description for the new agent. - :param instructions: System instructions for the agent. - :param tools: List of tools definitions for the agent. - :param tool_resources: Resources used by the agent's tools. - :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` - and adds automatic execution logic for functions). - :param temperature: Sampling temperature for generating agent responses. - :param top_p: Nucleus sampling parameter. - :param response_format: Response format for tool calls. - :param metadata: Key/value pairs for storing additional information. - :param content_type: Content type of the body. - :param kwargs: Additional parameters. - :return: An Agent object. - :raises: HttpResponseError for HTTP errors. - """ - if body is not _Unset: - if isinstance(body, IOBase): - return super().create_agent(body=body, content_type=content_type, **kwargs) - return super().create_agent(body=body, **kwargs) - - if toolset is not None: - self._toolset = toolset - tools = toolset.definitions - tool_resources = toolset.resources - - return super().create_agent( - model=model, - name=name, - description=description, - instructions=instructions, - tools=tools, - tool_resources=tool_resources, - temperature=temperature, - top_p=top_p, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - def get_toolset(self) -> Optional[_models.ToolSet]: - """ - Get the toolset for the agent. - - :return: The toolset for the agent. If not set, returns None. - :rtype: ~azure.ai.project.models.ToolSet - """ - if hasattr(self, "_toolset"): - return self._toolset - return None - - @overload - def create_run( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_run( - self, - thread_id: str, - *, - assistant_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_run( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_run( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - elif assistant_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=False, - stream=False, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # If streaming is enabled, return the custom stream object - return response - - @distributed_trace - def create_and_process_run( - self, - thread_id: str, - assistant_id: str, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: int = 1, - **kwargs: Any, - ) -> _models.ThreadRun: - """Creates a new run for an agent thread and processes the run. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. - Default value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run - the thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or - ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or - ~azure.ai.project.models.AgentsApiResponseFormatMode or - ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: The time in seconds to wait between polling the service for run status. - Default value is 1. - :paramtype sleep_interval: int - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - # Create and initiate the run with additional parameters - run = self.create_run( - thread_id=thread_id, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - # Monitor and process the run status - while run.status in ["queued", "in_progress", "requires_action"]: - time.sleep(sleep_interval) - run = self.get_run(thread_id=thread_id, run_id=run.id) - - if run.status == "requires_action" and isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logging.warning("No tool calls provided - cancelling run") - self.cancel_run(thread_id=thread_id, run_id=run.id) - break - - toolset = self.get_toolset() - if toolset: - tool_outputs = toolset.execute_tool_calls(tool_calls) - else: - raise ValueError("Toolset is not available in the client.") - - logging.info("Tool outputs: %s", tool_outputs) - if tool_outputs: - self.submit_tool_outputs_to_run(thread_id=thread_id, run_id=run.id, tool_outputs=tool_outputs) - - logging.info("Current run status: %s", run.status) - - return run - - @overload - def create_stream( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentRunStream: - """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_stream( - self, - thread_id: str, - *, - assistant_id: str, - content_type: str = "application/json", - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AgentRunStream: - """Creates a new stream for an agent thread. - - :param thread_id: Required. - :type thread_id: str - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.project.models.AgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_stream( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentRunStream: - """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_stream( - self, - thread_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - assistant_id: str = _Unset, - model: Optional[str] = None, - instructions: Optional[str] = None, - additional_instructions: Optional[str] = None, - additional_messages: Optional[List[_models.ThreadMessage]] = None, - tools: Optional[List[_models.ToolDefinition]] = None, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - truncation_strategy: Optional[_models.TruncationObject] = None, - tool_choice: Optional["_types.AgentsApiToolChoiceOption"] = None, - response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - metadata: Optional[Dict[str, str]] = None, - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AgentRunStream: - """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword assistant_id: The ID of the agent that should run the thread. Required. - :paramtype assistant_id: str - :keyword model: The overridden model name that the agent should use to run the thread. Default - value is None. - :paramtype model: str - :keyword instructions: The overridden system instructions that the agent should use to run the - thread. Default value is None. - :paramtype instructions: str - :keyword additional_instructions: Additional instructions to append at the end of the - instructions for the run. This is useful for modifying the behavior - on a per-run basis without overriding other instructions. Default value is None. - :paramtype additional_instructions: str - :keyword additional_messages: Adds additional messages to the thread before creating the run. - Default value is None. - :paramtype additional_messages: list[~azure.ai.project.models.ThreadMessage] - :keyword tools: The overridden list of enabled tools that the agent should use to run the - thread. Default value is None. - :paramtype tools: list[~azure.ai.project.models.ToolDefinition] - :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 - will make the output - more random, while lower values like 0.2 will make it more focused and deterministic. Default - value is None. - :paramtype temperature: float - :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model - considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens - comprising the top 10% probability mass are considered. - - We generally recommend altering this or temperature but not both. Default value is None. - :paramtype top_p: float - :keyword max_prompt_tokens: The maximum number of prompt tokens that may be used over the - course of the run. The run will make a best effort to use only - the number of prompt tokens specified, across multiple turns of the run. If the run exceeds - the number of prompt tokens specified, - the run will end with status ``incomplete``. See ``incomplete_details`` for more info. Default - value is None. - :paramtype max_prompt_tokens: int - :keyword max_completion_tokens: The maximum number of completion tokens that may be used over - the course of the run. The run will make a best effort - to use only the number of completion tokens specified, across multiple turns of the run. If - the run exceeds the number of - completion tokens specified, the run will end with status ``incomplete``. See - ``incomplete_details`` for more info. Default value is None. - :paramtype max_completion_tokens: int - :keyword truncation_strategy: The strategy to use for dropping messages as the context windows - moves forward. Default value is None. - :paramtype truncation_strategy: ~azure.ai.project.models.TruncationObject - :keyword tool_choice: Controls whether or not and which tool is called by the model. Is one of - the following types: str, Union[str, "_models.AgentsApiToolChoiceOptionMode"], - AgentsNamedToolChoice Default value is None. - :paramtype tool_choice: str or str or ~azure.ai.project.models.AgentsApiToolChoiceOptionMode or - ~azure.ai.project.models.AgentsNamedToolChoice - :keyword response_format: Specifies the format that the model must output. Is one of the - following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], - AgentsApiResponseFormat Default value is None. - :paramtype response_format: str or str or ~azure.ai.project.models.AgentsApiResponseFormatMode - or ~azure.ai.project.models.AgentsApiResponseFormat - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.project.models.AgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): # Handle overload with JSON body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - elif assistant_id is not _Unset: # Handle overload with keyword arguments. - response = super().create_run( - thread_id, - assistant_id=assistant_id, - model=model, - instructions=instructions, - additional_instructions=additional_instructions, - additional_messages=additional_messages, - tools=tools, - stream_parameter=True, - stream=True, - temperature=temperature, - top_p=top_p, - max_prompt_tokens=max_prompt_tokens, - max_completion_tokens=max_completion_tokens, - truncation_strategy=truncation_strategy, - tool_choice=tool_choice, - response_format=response_format, - metadata=metadata, - **kwargs, - ) - - elif isinstance(body, io.IOBase): # Handle overload with binary body. - content_type = kwargs.get("content_type", "application/json") - response = super().create_run(thread_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) - - return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - @overload - def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.project.models.AgentEventHandler - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_run( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def submit_tool_outputs_to_run( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.ThreadRun: - """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] - :param event_handler: The event handler to use for processing events during the run. - :param kwargs: Additional parameters. - :return: ThreadRun. The ThreadRun is compatible with MutableMapping - :rtype: ~azure.ai.project.models.ThreadRun - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=False, stream=False, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # If streaming is enabled, return the custom stream object - return response - - @overload - def submit_tool_outputs_to_stream( - self, thread_id: str, run_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - *, - tool_outputs: List[_models.ToolOutput], - content_type: str = "application/json", - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword event_handler: The event handler to use for processing events during the run. Default - value is None. - :paramtype event_handler: ~azure.ai.project.models.AgentEventHandler - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def submit_tool_outputs_to_stream( - self, thread_id: str, run_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def submit_tool_outputs_to_stream( - self, - thread_id: str, - run_id: str, - body: Union[JSON, IO[bytes]] = _Unset, - *, - tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: Optional[_models.AgentEventHandler] = None, - **kwargs: Any, - ) -> _models.AgentRunStream: - """Submits outputs from tools as requested by tool calls in a stream. Runs that need submitted tool - outputs will have a status of 'requires_action' with a required_action.type of - 'submit_tool_outputs'. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param run_id: Required. - :type run_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword tool_outputs: Required. - :paramtype tool_outputs: list[~azure.ai.project.models.ToolOutput] - :param event_handler: The event handler to use for processing events during the run. - :param kwargs: Additional parameters. - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.project.models.AgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if isinstance(body, dict): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - elif tool_outputs is not _Unset: - response = super().submit_tool_outputs_to_run( - thread_id, run_id, tool_outputs=tool_outputs, stream_parameter=True, stream=True, **kwargs - ) - - elif isinstance(body, io.IOBase): - content_type = kwargs.get("content_type", "application/json") - response = super().submit_tool_outputs_to_run(thread_id, run_id, body, content_type=content_type, **kwargs) - - else: - raise ValueError("Invalid combination of arguments provided.") - - # Cast the response to Iterator[bytes] for type correctness - response_iterator: Iterator[bytes] = cast(Iterator[bytes], response) - - return _models.AgentRunStream(response_iterator, self._handle_submit_tool_outputs, event_handler) - - def _handle_submit_tool_outputs( - self, run: _models.ThreadRun, event_handler: Optional[_models.AgentEventHandler] = None - ) -> None: - if isinstance(run.required_action, _models.SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - logger.debug("No tool calls to execute.") - return - - toolset = self.get_toolset() - if toolset: - tool_outputs = toolset.execute_tool_calls(tool_calls) - else: - logger.warning("Toolset is not available in the client.") - return - - logger.info(f"Tool outputs: {tool_outputs}") - if tool_outputs: - with self.submit_tool_outputs_to_stream( - thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler - ) as stream: - stream.until_done() - - @overload - def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file( - self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.project._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file( - self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def upload_file( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :param file: File content. Required if `body` and `purpose` are not provided. - :param file_path: Path to the file. Required if `body` and `purpose` are not provided. - :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :param filename: The name of the file. - :param kwargs: Additional parameters. - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - return super().upload_file(body=body, **kwargs) - - if isinstance(purpose, FilePurpose): - purpose = purpose.value - - if file is not None and purpose is not None: - return super().upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - - if file_path is not None and purpose is not None: - if not os.path.isfile(file_path): - raise FileNotFoundError(f"The file path provided does not exist: {file_path}") - - try: - with open(file_path, "rb") as f: - content = f.read() - - # Determine filename and create correct FileType - base_filename = filename or os.path.basename(file_path) - file_content: FileType = (base_filename, content) - - return super().upload_file(file=file_content, purpose=purpose, **kwargs) - except IOError as e: - raise IOError(f"Unable to read file: {file_path}. Reason: {str(e)}") - - raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") - - @overload - def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param body: Required. - :type body: JSON - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file_and_poll( - self, - *, - file: FileType, - purpose: Union[str, _models.FilePurpose], - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :keyword file: Required. - :paramtype file: ~azure.ai.project._vendor.FileType - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :keyword filename: Default value is None. - :paramtype filename: str - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def upload_file_and_poll( - self, file_path: str, *, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any - ) -> _models.OpenAIFile: - """Uploads a file for use by other operations. - - :param file_path: Required. - :type file_path: str - :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required. - :paramtype purpose: str or ~azure.ai.project.models.FilePurpose - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :rtype: ~azure.ai.project.models.OpenAIFile - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def upload_file_and_poll( - self, - body: Optional[JSON] = None, - *, - file: Optional[FileType] = None, - file_path: Optional[str] = None, - purpose: Union[str, _models.FilePurpose, None] = None, - filename: Optional[str] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.OpenAIFile: - """ - Uploads a file for use by other operations, delegating to the generated operations. - - :param body: JSON. Required if `file` and `purpose` are not provided. - :param file: File content. Required if `body` and `purpose` are not provided. - :param file_path: Path to the file. Required if `body` and `purpose` are not provided. - :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", - "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :param filename: The name of the file. - :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value - is 1. - :paramtype sleep_interval: float - :param kwargs: Additional parameters. - :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping - :raises FileNotFoundError: If the file_path is invalid. - :raises IOError: If there are issues with reading the file. - :raises: HttpResponseError for HTTP errors. - """ - if body is not None: - uploaded_file = self.upload_file(body=body, **kwargs) - elif file is not None and purpose is not None: - uploaded_file = self.upload_file(file=file, purpose=purpose, filename=filename, **kwargs) - elif file_path is not None and purpose is not None: - uploaded_file = self.upload_file(file_path=file_path, purpose=purpose, **kwargs) - else: - raise ValueError( - "Invalid parameters for upload_file_and_poll. Please provide either 'body', " - "or both 'file' and 'purpose', or both 'file_path' and 'purpose'." - ) - - while uploaded_file.status in ["uploaded", "pending", "running"]: - time.sleep(sleep_interval) - uploaded_file = self.get_file(uploaded_file.id) - - return uploaded_file - - @overload - def create_vector_store_and_poll( - self, body: JSON, *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_and_poll( - self, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_and_poll( - self, body: IO[bytes], *, content_type: str = "application/json", sleep_interval: float = 1, **kwargs: Any - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_and_poll( - self, - body: Union[JSON, IO[bytes], None] = None, - *, - content_type: str = "application/json", - file_ids: Optional[List[str]] = None, - name: Optional[str] = None, - expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - metadata: Optional[Dict[str, str]] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStore: - """Creates a vector store and poll. - - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: A list of file IDs that the vector store should use. Useful for tools like - ``file_search`` that can access files. Default value is None. - :paramtype file_ids: list[str] - :keyword name: The name of the vector store. Default value is None. - :paramtype name: str - :keyword expires_after: Details on when this vector store expires. Default value is None. - :paramtype expires_after: ~azure.ai.project.models.VectorStoreExpirationPolicy - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Only applicable if file_ids is non-empty. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used - for storing additional information about that object in a structured format. Keys may be up to - 64 characters in length and values may be up to 512 characters in length. Default value is - None. - :paramtype metadata: dict[str, str] - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStore. The VectorStore is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStore - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is not None: - vector_store = self.create_vector_store(body=body, content_type=content_type, **kwargs) - elif file_ids is not None or (name is not None and expires_after is not None): - vector_store = self.create_vector_store( - content_type=content_type, - file_ids=file_ids, - name=name, - expires_after=expires_after, - chunking_strategy=chunking_strategy, - metadata=metadata, - **kwargs, - ) - else: - raise ValueError( - "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " - "'file_ids', or 'name' and 'expires_after'." - ) - - while vector_store.status == "in_progress": - time.sleep(sleep_interval) - vector_store = self.get_vector_store(vector_store.id) - - return vector_store - - @overload - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: JSON, - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - *, - file_ids: List[str], - content_type: str = "application/json", - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: IO[bytes], - *, - content_type: str = "application/json", - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Required. - :type body: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value - is 1. - :paramtype sleep_interval: float - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_vector_store_file_batch_and_poll( - self, - vector_store_id: str, - body: Union[JSON, IO[bytes]] = None, - *, - file_ids: List[str] = _Unset, - chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, - sleep_interval: float = 1, - **kwargs: Any, - ) -> _models.VectorStoreFileBatch: - """Create a vector store file batch and poll. - - :param vector_store_id: Identifier of the vector store. Required. - :type vector_store_id: str - :param body: Is either a JSON type or a IO[bytes] type. Required. - :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] - :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will - use the auto strategy. Default value is None. - :paramtype chunking_strategy: ~azure.ai.project.models.VectorStoreChunkingStrategyRequest - :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping - :rtype: ~azure.ai.project.models.VectorStoreFileBatch - :raises ~azure.core.exceptions.HttpResponseError: - """ - - if body is None: - vector_store_file_batch = super().create_vector_store_file_batch( - vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs - ) - else: - content_type = kwargs.get("content_type", "application/json") - vector_store_file_batch = super().create_vector_store_file_batch( - body=body, content_type=content_type, **kwargs - ) - - while vector_store_file_batch.status == "in_progress": - time.sleep(sleep_interval) - vector_store_file_batch = super().get_vector_store_file_batch( - vector_store_id=vector_store_id, batch_id=vector_store_file_batch.id - ) - - return vector_store_file_batch - - -__all__: List[str] = [ - "AgentsOperations", - "ConnectionsOperations", - "InferenceOperations", -] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/ai/azure-ai-project/azure/ai/project/py.typed b/sdk/ai/azure-ai-project/azure/ai/project/py.typed deleted file mode 100644 index e5aff4f83af8..000000000000 --- a/sdk/ai/azure-ai-project/azure/ai/project/py.typed +++ /dev/null @@ -1 +0,0 @@ -# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/dev_requirements.txt b/sdk/ai/azure-ai-project/dev_requirements.txt deleted file mode 100644 index c82827bb56f4..000000000000 --- a/sdk/ai/azure-ai-project/dev_requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ --e ../../../tools/azure-sdk-tools -../../core/azure-core -../../identity/azure-identity -aiohttp \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_basics_async.py deleted file mode 100644 index bcc25e36490b..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_basics_async.py +++ /dev/null @@ -1,76 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_basics_async.py - -DESCRIPTION: - This sample demonstrates how to use basic agent operations from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_basics_async.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio -import time - -from azure.ai.project.aio import AIProjectClient -from azure.identity import DefaultAzureCredential - -import os - - -async def main(): - - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - async with project_client: - agent = await project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await project_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - message = await project_client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) - print(f"Created message, message ID: {message.id}") - - run = await project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - - # poll the run as long as run status is queued or in progress - while run.status in ["queued", "in_progress", "requires_action"]: - # wait for a second - time.sleep(1) - run = await project_client.agents.get_run(thread_id=thread.id, run_id=run.id) - - print(f"Run status: {run.status}") - - print(f"Run completed with status: {run.status}") - - await project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = await project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_functions_async.py deleted file mode 100644 index 75a37b873be3..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_functions_async.py +++ /dev/null @@ -1,117 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_functions_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with custom functions from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_functions_async.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio -import time - -from azure.ai.project.aio import AIProjectClient -from azure.ai.project.models import AsyncFunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction -from azure.identity import DefaultAzureCredential - -import os - -from user_async_functions import user_async_functions - - -async def main(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - async with project_client: - # Initialize assistant functions - functions = AsyncFunctionTool(functions=user_async_functions) - - # Create agent - agent = await project_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - tools=functions.definitions, - ) - print(f"Created agent, agent ID: {agent.id}") - - # Create thread for communication - thread = await project_client.agents.create_thread() - print(f"Created thread, ID: {thread.id}") - - # Create and send message - message = await project_client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, what's the time?" - ) - print(f"Created message, ID: {message.id}") - - # Create and run assistant task - run = await project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, ID: {run.id}") - - # Polling loop for run status - while run.status in ["queued", "in_progress", "requires_action"]: - time.sleep(4) - run = await project_client.agents.get_run(thread_id=thread.id, run_id=run.id) - - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print("No tool calls provided - cancelling run") - await project_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) - break - - tool_outputs = [] - for tool_call in tool_calls: - if isinstance(tool_call, RequiredFunctionToolCall): - try: - output = await functions.execute(tool_call) - tool_outputs.append( - { - "tool_call_id": tool_call.id, - "output": output, - } - ) - except Exception as e: - print(f"Error executing tool_call {tool_call.id}: {e}") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - await project_client.agents.submit_tool_outputs_to_run( - thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs - ) - - print(f"Current run status: {run.status}") - - print(f"Run completed with status: {run.status}") - - # Delete the agent when done - await project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = await project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py deleted file mode 100644 index f134ae144221..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py +++ /dev/null @@ -1,96 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_eventhandler_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler in streaming from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_stream_eventhandler_async.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio -from typing import Any - -from azure.ai.project.aio import AIProjectClient -from azure.ai.project.models._models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.ai.project.models._patch import AsyncAgentEventHandler -from azure.identity import DefaultAzureCredential - -import os - - -class MyEventHandler(AsyncAgentEventHandler): - async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - for content_part in delta.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - async def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - async def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - async def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - async def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - async def on_done(self) -> None: - print("Stream completed.") - - async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -async def main(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - async with project_client: - agent = await project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await project_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = await project_client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) - print(f"Created message, message ID {message.id}") - - async with await project_client.agents.create_stream( - thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() - ) as stream: - await stream.until_done() - - await project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = await project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py deleted file mode 100644 index 505bc439c25e..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py +++ /dev/null @@ -1,111 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_eventhandler_with_toolset_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler and toolset from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_stream_eventhandler_with_toolset_async.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio -from typing import Any - -from azure.ai.project.aio import AIProjectClient -from azure.ai.project.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.ai.project.models import AsyncAgentEventHandler, AsyncFunctionTool, AsyncToolSet -from azure.ai.project.aio.operations import AgentsOperations -from azure.identity import DefaultAzureCredential - -import os - -from user_async_functions import user_async_functions - - -class MyEventHandler(AsyncAgentEventHandler): - - async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - for content_part in delta.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - async def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - async def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - if run.status == "failed": - print(f"Run failed. Error: {run.last_error}") - - async def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - async def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - async def on_done(self) -> None: - print("Stream completed.") - - async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -async def main(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - # Initialize toolset with user functions - functions = AsyncFunctionTool(user_async_functions) - toolset = AsyncToolSet() - toolset.add(functions) - - async with project_client: - - agent = await project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant", toolset=toolset - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await project_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = await project_client.agents.create_message( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", - ) - print(f"Created message, message ID {message.id}") - - async with await project_client.agents.create_stream( - thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() - ) as stream: - await stream.until_done() - - await project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = await project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_iteration_async.py deleted file mode 100644 index f3ad6ca178c1..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_stream_iteration_async.py +++ /dev/null @@ -1,92 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_iteration_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with interation in streaming from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_stream_iteration_async.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio - -from azure.ai.project.aio import AIProjectClient -from azure.ai.project.models import AgentStreamEvent -from azure.ai.project.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.identity import DefaultAzureCredential - -import os - - -async def main(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - async with project_client: - agent = await project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await project_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = await project_client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) - print(f"Created message, message ID {message.id}") - - async with await project_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: - async for event_type, event_data in stream: - - if isinstance(event_data, MessageDeltaChunk): - for content_part in event_data.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - elif isinstance(event_data, ThreadMessage): - print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") - - elif isinstance(event_data, ThreadRun): - print(f"ThreadRun status: {event_data.status}") - - elif isinstance(event_data, RunStep): - print(f"RunStep type: {event_data.type}, Status: {event_data.status}") - - elif event_type == AgentStreamEvent.ERROR: - print(f"An error occurred. Data: {event_data}") - - elif event_type == AgentStreamEvent.DONE: - print("Stream completed.") - break - - else: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - await project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = await project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py deleted file mode 100644 index d6c2fbf5dfb3..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py +++ /dev/null @@ -1,94 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_vector_store_batch_file_search_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_vector_store_batch_file_search_async.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import asyncio -import os -from azure.ai.project.aio import AIProjectClient -from azure.ai.project.models import FileSearchTool, FilePurpose -from azure.identity import DefaultAzureCredential - - -async def main(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - async with project_client: - - # upload a file and wait for it to be processed - file = await project_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - # create a vector store with no file and wait for it to be processed - vector_store = await project_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") - print(f"Created vector store, vector store ID: {vector_store.id}") - - # add the file to the vector store or you can supply file ids in the vector store creation - vector_store_file_batch = await project_client.agents.create_vector_store_file_batch_and_poll( - vector_store_id=vector_store.id, file_ids=[file.id] - ) - print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") - - # create a file search tool - file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - - # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file - agent = await project_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await project_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - message = await project_client.agents.create_message( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, run ID: {run.id}") - - await project_client.agents.delete_file(file.id) - print("Deleted file") - - await project_client.agents.delete_vector_store(vector_store.id) - print("Deleted vectore store") - - await project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = await project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py deleted file mode 100644 index 006d93ae45ee..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ /dev/null @@ -1,83 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_with_file_search_attachment_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations to create messages with file search attachments from - the Azure Agents service using a asynchronous client. - -USAGE: - python sample_agents_with_file_search_attachment_async.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio - -from azure.ai.project.aio import AIProjectClient -from azure.ai.project.models import FilePurpose -from azure.ai.project.models import FileSearchTool, MessageAttachment, ToolResources -from azure.identity import DefaultAzureCredential - -import os - - -async def main(): - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # At the moment, it should be in the format ";;;" - # Customer needs to login to Azure subscription via Azure CLI and set the environment variables - - project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] - ) - - # upload a file and wait for it to be processed - async with project_client: - file = await project_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) - - # Create agent with file search tool - agent = await project_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = await project_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - # Create a message with the file search attachment - # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. - attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) - message = await project_client.agents.create_message( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] - ) - print(f"Created message, message ID: {message.id}") - - run = await project_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id, sleep_interval=4 - ) - print(f"Created run, run ID: {run.id}") - - print(f"Run completed with status: {run.status}") - - await project_client.agents.delete_file(file.id) - print("Deleted file") - - await project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = await project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/agents/async_samples/user_async_functions.py b/sdk/ai/azure-ai-project/samples/agents/async_samples/user_async_functions.py deleted file mode 100644 index 4931352e03c6..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/async_samples/user_async_functions.py +++ /dev/null @@ -1,29 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -import asyncio -import os -import sys - - -# Add parent directory to sys.path to import user_functions -current_dir = os.path.dirname(os.path.abspath(__file__)) -parent_dir = os.path.abspath(os.path.join(current_dir, "..")) -if parent_dir not in sys.path: - sys.path.insert(0, parent_dir) -from user_functions import fetch_current_datetime, fetch_weather, send_email - - -async def send_email_async(recipient: str, subject: str, body: str) -> str: - await asyncio.sleep(1) - return send_email(recipient, subject, body) - - -# Statically defined user functions for fast reference with send_email as async but the rest as sync -user_async_functions = { - "fetch_current_datetime": fetch_current_datetime, - "fetch_weather": fetch_weather, - "send_email": send_email_async, -} diff --git a/sdk/ai/azure-ai-project/samples/agents/product_info_1.md b/sdk/ai/azure-ai-project/samples/agents/product_info_1.md deleted file mode 100644 index 041155831d53..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/product_info_1.md +++ /dev/null @@ -1,51 +0,0 @@ -# Information about product item_number: 1 - -## Brand -Contoso Galaxy Innovations - -## Category -Smart Eyewear - -## Features -- Augmented Reality interface -- Voice-controlled AI assistant -- HD video recording with 3D audio -- UV protection and blue light filtering -- Wireless charging with extended battery life - -## User Guide - -### 1. Introduction -Introduction to your new SmartView Glasses - -### 2. Product Overview -Overview of features and controls - -### 3. Sizing and Fit -Finding your perfect fit and style adjustments - -### 4. Proper Care and Maintenance -Cleaning and caring for your SmartView Glasses - -### 5. Break-in Period -Adjusting to the augmented reality experience - -### 6. Safety Tips -Safety guidelines for public and private spaces - -### 7. Troubleshooting -Quick fixes for common issues - -## Warranty Information -Two-year limited warranty on all electronic components - -## Contact Information -Customer Support at support@contoso-galaxy-innovations.com - -## Return Policy -30-day return policy with no questions asked - -## FAQ -- How to sync your SmartView Glasses with your devices -- Troubleshooting connection issues -- Customizing your augmented reality environment diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_basics.py deleted file mode 100644 index c8c51ea43947..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/sample_agents_basics.py +++ /dev/null @@ -1,63 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_basics.py - -DESCRIPTION: - This sample demonstrates how to use basic agent operations from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_basics.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os, time -from azure.ai.project import AIProjectClient -from azure.identity import DefaultAzureCredential - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) - -with project_client: - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = project_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - print(f"Created message, message ID: {message.id}") - - run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - - # poll the run as long as run status is queued or in progress - while run.status in ["queued", "in_progress", "requires_action"]: - # wait for a second - time.sleep(1) - run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) - - print(f"Run status: {run.status}") - - project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_code_interpreter_attachment.py deleted file mode 100644 index e4dbc4dd82ca..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/sample_agents_code_interpreter_attachment.py +++ /dev/null @@ -1,80 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_code_interpreter_attachment.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with code interpreter from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_code_interpreter_attachment.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.project import AIProjectClient -from azure.ai.project.models import CodeInterpreterTool -from azure.ai.project.models import FilePurpose -from azure.ai.project.models import MessageAttachment -from azure.identity import DefaultAzureCredential - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - -with project_client: - # upload a file and wait for it to be processed - file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - code_interpreter = CodeInterpreterTool() - - # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - tools=code_interpreter.definitions, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = project_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - # create a message with the attachment - attachment = MessageAttachment(file_id=file.id, tools=code_interpreter.definitions) - message = project_client.agents.create_message( - thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] - ) - print(f"Created message, message ID: {message.id}") - - run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - # Check if you got "Rate limit is exceeded.", then you want to get more quota - print(f"Run failed: {run.last_error}") - - project_client.agents.delete_file(file.id) - print("Deleted file") - - project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_file_search.py deleted file mode 100644 index 3d9c25fc3e96..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/sample_agents_file_search.py +++ /dev/null @@ -1,87 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_file_search.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with file searching from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_file_search.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.project import AIProjectClient -from azure.ai.project.models._patch import FileSearchTool -from azure.identity import DefaultAzureCredential - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - -with project_client: - - openai_file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") - print(f"Uploaded file, file ID: {openai_file.id}") - - openai_vectorstore = project_client.agents.create_vector_store_and_poll(file_ids=[openai_file.id], name="my_vectorstore") - print(f"Created vector store, vector store ID: {openai_vectorstore.id}") - - # Create file search tool with resources - file_search = FileSearchTool(vector_store_ids=[openai_vectorstore.id]) - - # Create agent with file search tool and process assistant run - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="Hello, you are helpful assistant and can search information from uploaded files", - tools=file_search.definitions, - tool_resources=file_search.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - # Create thread for communication - thread = project_client.agents.create_thread() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = project_client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" - ) - print(f"Created message, ID: {message.id}") - - # Create and process assistant run in thread with tools - run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - # Check if you got "Rate limit is exceeded.", then you want to get more quota - print(f"Run failed: {run.last_error}") - - # Delete the file when done - project_client.agents.delete_vector_store(openai_vectorstore.id) - print("Deleted vector store") - - # Delete the agent when done - project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_functions.py deleted file mode 100644 index b73898dae603..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/sample_agents_functions.py +++ /dev/null @@ -1,105 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_functions.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with custom functions from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_functions.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import os, time -from azure.ai.project import AIProjectClient -from azure.identity import DefaultAzureCredential -from azure.ai.project.models import FunctionTool, SubmitToolOutputsAction, RequiredFunctionToolCall -from user_functions import user_functions - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - -# Initialize function tool with user functions -functions = FunctionTool(functions=user_functions) - -with project_client: - # Create an agent and run user's request with function calls - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are a helpful assistant", - tools=functions.definitions, - ) - print(f"Created agent, ID: {agent.id}") - - thread = project_client.agents.create_thread() - print(f"Created thread, ID: {thread.id}") - - message = project_client.agents.create_message( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York?", - ) - print(f"Created message, ID: {message.id}") - - run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, ID: {run.id}") - - while run.status in ["queued", "in_progress", "requires_action"]: - time.sleep(1) - run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) - - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print("No tool calls provided - cancelling run") - project_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) - break - - tool_outputs = [] - for tool_call in tool_calls: - if isinstance(tool_call, RequiredFunctionToolCall): - try: - output = functions.execute(tool_call) - tool_outputs.append( - { - "tool_call_id": tool_call.id, - "output": output, - } - ) - except Exception as e: - print(f"Error executing tool_call {tool_call.id}: {e}") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - project_client.agents.submit_tool_outputs_to_run( - thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs - ) - - print(f"Current run status: {run.status}") - - print(f"Run completed with status: {run.status}") - - # Delete the agent when done - project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_run_with_toolset.py deleted file mode 100644 index f8ac75278942..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/sample_agents_run_with_toolset.py +++ /dev/null @@ -1,80 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_run_with_toolset.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with toolset from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_run_with_toolset.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.project import AIProjectClient -from azure.identity import DefaultAzureCredential -from azure.ai.project.models import FunctionTool, ToolSet, CodeInterpreterTool -from user_functions import user_functions - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) - -# Initialize agent toolset with user functions and code interpreter -functions = FunctionTool(user_functions) -code_interpreter = CodeInterpreterTool() - -toolset = ToolSet() -toolset.add(functions) -toolset.add(code_interpreter) - -# Create agent with toolset and process assistant run -with project_client: - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset - ) - print(f"Created agent, ID: {agent.id}") - - # Create thread for communication - thread = project_client.agents.create_thread() - print(f"Created thread, ID: {thread.id}") - - # Create message to thread - message = project_client.agents.create_message( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York?", - ) - print(f"Created message, ID: {message.id}") - - # Create and process agent run in thread with tools - run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Run finished with status: {run.status}") - - if run.status == "failed": - print(f"Run failed: {run.last_error}") - - # Delete the assistant when done - project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - # Fetch and log all messages - messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler.py deleted file mode 100644 index 86a0cab17b29..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler.py +++ /dev/null @@ -1,98 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_eventhandler.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler in streaming from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_eventhandler.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.project import AIProjectClient -from azure.identity import DefaultAzureCredential - -from azure.ai.project.models import ( - AgentEventHandler, - MessageDeltaTextContent, - MessageDeltaChunk, - ThreadMessage, - ThreadRun, - RunStep, -) - -from typing import Any - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) - - -class MyEventHandler(AgentEventHandler): - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - for content_part in delta.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - def on_done(self) -> None: - print("Stream completed.") - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -with project_client: - # Create an agent and run stream with event handler - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" - ) - print(f"Created agent, agent ID {agent.id}") - - thread = project_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - print(f"Created message, message ID {message.id}") - - with project_client.agents.create_stream( - thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() - ) as stream: - stream.until_done() - - project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_functions.py deleted file mode 100644 index ea4a6b680196..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_functions.py +++ /dev/null @@ -1,132 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_eventhandler_with_functions.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler and toolset from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_eventhandler_with_functions.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.project import AIProjectClient -from azure.ai.project.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.ai.project.models import AgentEventHandler -from azure.identity import DefaultAzureCredential -from azure.ai.project.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction - -from typing import Any - -from user_functions import user_functions - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - - -class MyEventHandler(AgentEventHandler): - - def __init__(self, functions: FunctionTool) -> None: - self.functions = functions - - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - for content_part in delta.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - if run.status == "failed": - print(f"Run failed. Error: {run.last_error}") - - if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): - tool_calls = run.required_action.submit_tool_outputs.tool_calls - - tool_outputs = [] - for tool_call in tool_calls: - if isinstance(tool_call, RequiredFunctionToolCall): - try: - output = functions.execute(tool_call) - tool_outputs.append( - { - "tool_call_id": tool_call.id, - "output": output, - } - ) - except Exception as e: - print(f"Error executing tool_call {tool_call.id}: {e}") - - print(f"Tool outputs: {tool_outputs}") - if tool_outputs: - with project_client.agents.submit_tool_outputs_to_stream( - thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=self - ) as stream: - stream.until_done() - - def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - def on_done(self) -> None: - print("Stream completed.") - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -with project_client: - functions = FunctionTool(user_functions) - - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are a helpful assistant", - tools=functions.definitions, - ) - print(f"Created agent, ID: {agent.id}") - - thread = project_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = project_client.agents.create_message( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York? Also let me know the details.", - ) - print(f"Created message, message ID {message.id}") - - with project_client.agents.create_stream( - thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler(functions) - ) as stream: - stream.until_done() - - project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_toolset.py deleted file mode 100644 index b63d137a0671..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ /dev/null @@ -1,109 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_eventhandler_with_toolset.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with an event handler and toolset from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_eventhandler_with_toolset.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.project import AIProjectClient -from azure.ai.project.models import Agent, MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.ai.project.models import AgentEventHandler -from azure.ai.project.operations import AgentsOperations -from azure.identity import DefaultAzureCredential -from azure.ai.project.models import FunctionTool, ToolSet - - -import os -from typing import Any - -from user_functions import user_functions - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - - -# When using FunctionTool with ToolSet in agent creation, the tool call events are handled inside the create_stream -# method and functions gets automatically called by default. -class MyEventHandler(AgentEventHandler): - - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - for content_part in delta.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - - def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") - - if run.status == "failed": - print(f"Run failed. Error: {run.last_error}") - - def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") - - def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") - - def on_done(self) -> None: - print("Stream completed.") - - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - -with project_client: - functions = FunctionTool(user_functions) - toolset = ToolSet() - toolset.add(functions) - - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset - ) - print(f"Created agent, ID: {agent.id}") - - thread = project_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = project_client.agents.create_message( - thread_id=thread.id, - role="user", - content="Hello, send an email with the datetime and weather information in New York? Also let me know the details", - ) - print(f"Created message, message ID {message.id}") - - with project_client.agents.create_stream( - thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() - ) as stream: - stream.until_done() - - project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration.py deleted file mode 100644 index 7d89bd2ab8bc..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration.py +++ /dev/null @@ -1,92 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_iteration.py - -DESCRIPTION: - This sample demonstrates how to use agent operations in streaming from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_iteration.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.project import AIProjectClient -from azure.identity import DefaultAzureCredential -from azure.ai.project.models import ( - AgentStreamEvent, - MessageDeltaTextContent, - MessageDeltaChunk, - ThreadMessage, - ThreadRun, - RunStep, -) - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) - -with project_client: - # Create an agent and run stream with iteration - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" - ) - print(f"Created agent, ID {agent.id}") - - thread = project_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - print(f"Created message, message ID {message.id}") - - with project_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: - - for event_type, event_data in stream: - - if isinstance(event_data, MessageDeltaChunk): - for content_part in event_data.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - elif isinstance(event_data, ThreadMessage): - print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") - - elif isinstance(event_data, ThreadRun): - print(f"ThreadRun status: {event_data.status}") - - elif isinstance(event_data, RunStep): - print(f"RunStep type: {event_data.type}, Status: {event_data.status}") - - elif event_type == AgentStreamEvent.ERROR: - print(f"An error occurred. Data: {event_data}") - - elif event_type == AgentStreamEvent.DONE: - print("Stream completed.") - break - - else: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration_with_toolset.py deleted file mode 100644 index 9ae1d421c9ea..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/sample_agents_stream_iteration_with_toolset.py +++ /dev/null @@ -1,122 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_stream_iteration_with_toolset.py - -DESCRIPTION: - This sample demonstrates how to use agent operations with toolset and iteration in streaming from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_stream_iteration_with_toolset.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.project import AIProjectClient -from azure.ai.project.models import AgentStreamEvent -from azure.ai.project.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.ai.project.models import FunctionTool, ToolSet -from azure.ai.project.operations import AgentsOperations -from azure.identity import DefaultAzureCredential -from user_functions import user_functions - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - - -# Function to handle tool stream iteration -def handle_submit_tool_outputs(operations: AgentsOperations, thread_id, run_id, tool_outputs): - try: - with operations.submit_tool_outputs_to_stream( - thread_id=thread_id, - run_id=run_id, - tool_outputs=tool_outputs, - ) as tool_stream: - for tool_event_type, tool_event_data in tool_stream: - if tool_event_type == AgentStreamEvent.ERROR: - print(f"An error occurred in tool stream. Data: {tool_event_data}") - elif tool_event_type == AgentStreamEvent.DONE: - print("Tool stream completed.") - break - else: - if isinstance(tool_event_data, MessageDeltaChunk): - handle_message_delta(tool_event_data) - - except Exception as e: - print(f"Failed to process tool stream: {e}") - - -# Function to handle message delta chunks -def handle_message_delta(delta: MessageDeltaChunk) -> None: - for content_part in delta.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") - - -functions = FunctionTool(user_functions) -toolset = ToolSet() -toolset.add(functions) - -with project_client: - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = project_client.agents.create_thread() - print(f"Created thread, thread ID {thread.id}") - - message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what's the time?") - print(f"Created message, message ID {message.id}") - - with project_client.agents.create_stream(thread_id=thread.id, assistant_id=agent.id) as stream: - - for event_type, event_data in stream: - - if isinstance(event_data, MessageDeltaChunk): - handle_message_delta(event_data) - - elif isinstance(event_data, ThreadMessage): - print(f"ThreadMessage created. ID: {event_data.id}, Status: {event_data.status}") - - elif isinstance(event_data, ThreadRun): - print(f"ThreadRun status: {event_data.status}") - - if event_data.status == "failed": - print(f"Run failed. Error: {event_data.last_error}") - - elif isinstance(event_data, RunStep): - print(f"RunStep type: {event_data.type}, Status: {event_data.status}") - - elif event_type == AgentStreamEvent.ERROR: - print(f"An error occurred. Data: {event_data}") - - elif event_type == AgentStreamEvent.DONE: - print("Stream completed.") - break - - else: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") - - project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_vector_store_batch_file_search.py deleted file mode 100644 index b5ddaf482310..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/sample_agents_vector_store_batch_file_search.py +++ /dev/null @@ -1,88 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_vector_store_batch_file_search_async.py - -DESCRIPTION: - This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_vector_store_batch_file_search_async.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.project import AIProjectClient -from azure.ai.project.models import FileSearchTool, FilePurpose -from azure.identity import DefaultAzureCredential - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - -with project_client: - - # upload a file and wait for it to be processed - file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - # create a vector store with no file and wait for it to be processed - vector_store = project_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") - print(f"Created vector store, vector store ID: {vector_store.id}") - - # add the file to the vector store or you can supply file ids in the vector store creation - vector_store_file_batch = project_client.agents.create_vector_store_file_batch_and_poll( - vector_store_id=vector_store.id, file_ids=[file.id] - ) - print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") - - # create a file search tool - file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - - # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = project_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - message = project_client.agents.create_message( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" - ) - print(f"Created message, message ID: {message.id}") - - run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, run ID: {run.id}") - - project_client.agents.delete_file(file.id) - print("Deleted file") - - project_client.agents.delete_vector_store(vector_store.id) - print("Deleted vectore store") - - project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-project/samples/agents/sample_agents_with_file_search_attachment.py deleted file mode 100644 index 416060fbf206..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/sample_agents_with_file_search_attachment.py +++ /dev/null @@ -1,75 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_with_file_search_attachment.py - -DESCRIPTION: - This sample demonstrates how to use agent operations to create messages with file search attachments from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_agents_with_file_search_attachment.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.project import AIProjectClient -from azure.ai.project.models import FilePurpose -from azure.ai.project.models import MessageAttachment -from azure.ai.project.models import FileSearchTool -from azure.identity import DefaultAzureCredential - - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] -) - -with project_client: - - # upload a file and wait for it to be processed - file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) - print(f"Uploaded file, file ID: {file.id}") - - # Create agent with file search tool - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - ) - print(f"Created agent, agent ID: {agent.id}") - - thread = project_client.agents.create_thread() - print(f"Created thread, thread ID: {thread.id}") - - # Create a message with the file search attachment - # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. - attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) - message = project_client.agents.create_message( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] - ) - print(f"Created message, message ID: {message.id}") - - run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, run ID: {run.id}") - - project_client.agents.delete_file(file.id) - print("Deleted file") - - project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = project_client.agents.list_messages(thread_id=thread.id) - print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-project/samples/agents/user_functions.py b/sdk/ai/azure-ai-project/samples/agents/user_functions.py deleted file mode 100644 index 8072b1b8a944..000000000000 --- a/sdk/ai/azure-ai-project/samples/agents/user_functions.py +++ /dev/null @@ -1,65 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -import json -import datetime - -# These are the user-defined functions that can be called by the agent. - - -def fetch_current_datetime() -> str: - """ - Get the current time as a JSON string. - - :return: The current time in JSON format. - :rtype: str - """ - current_time = datetime.datetime.now() - time_json = json.dumps({"current_time": current_time.strftime("%Y-%m-%d %H:%M:%S")}) - return time_json - - -def fetch_weather(location: str) -> str: - """ - Fetches the weather information for the specified location. - - :param location (str): The location to fetch weather for. - :return: Weather information as a JSON string. - :rtype: str - """ - # In a real-world scenario, you'd integrate with a weather API. - # Here, we'll mock the response. - mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} - weather = mock_weather_data.get(location, "Weather data not available for this location.") - weather_json = json.dumps({"weather": weather}) - return weather_json - - -def send_email(recipient: str, subject: str, body: str) -> str: - """ - Sends an email with the specified subject and body to the recipient. - - :param recipient (str): Email address of the recipient. - :param subject (str): Subject of the email. - :param body (str): Body content of the email. - :return: Confirmation message. - :rtype: str - """ - # In a real-world scenario, you'd use an SMTP server or an email service API. - # Here, we'll mock the email sending. - print(f"Sending email to {recipient}...") - print(f"Subject: {subject}") - print(f"Body:\n{body}") - - message_json = json.dumps({"message": f"Email successfully sent to {recipient}."}) - return message_json - - -# Statically defined user functions for fast reference -user_functions = { - "fetch_current_datetime": fetch_current_datetime, - "fetch_weather": fetch_weather, - "send_email": send_email, -} diff --git a/sdk/ai/azure-ai-project/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-project/samples/connections/async_samples/sample_connections_async.py deleted file mode 100644 index 33dcfdaba1dd..000000000000 --- a/sdk/ai/azure-ai-project/samples/connections/async_samples/sample_connections_async.py +++ /dev/null @@ -1,139 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_connections_async.py - -DESCRIPTION: - Given an asynchronous AIProjectClient, this sample demonstrates how to enumerate connections - and get connections properties. - -USAGE: - python sample_connections_async.py - - Before running the sample: - - pip install azure.ai.project aiohttp azure-identity - - Set the environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import asyncio -import os -from azure.ai.project.aio import AIProjectClient -from azure.ai.project.models import ConnectionType, AuthenticationType -from azure.identity import DefaultAzureCredential - - -async def sample_connections_async(): - - project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], - ) - - async with project_client: - - # List the properties of all connections - connections = await project_client.connections.list() - print(f"====> Listing of all connections (found {len(connections)}):") - for connection in connections: - print(connection) - - # List the properties of all connections of a particular "type" (In this sample, Azure OpenAI connections) - connections = await project_client.connections.list( - connection_type=ConnectionType.AZURE_OPEN_AI, - ) - print("====> Listing of all Azure Open AI connections (found {len(connections)}):") - for connection in connections: - print(connection) - - # Get the properties of the default connection of a particular "type", with credentials - connection = await project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, - with_credentials=True, # Optional. Defaults to "False" - ) - print("====> Get default Azure Open AI connection:") - print(connection) - - # Get the properties of a connection by connection name: - connection = await project_client.connections.get( - connection_name=os.environ["AI_CLIENT_CONNECTION_NAME"], - with_credentials=True, # Optional. Defaults to "False" - ) - print("====> Get connection by name:") - print(connection) - - # Examples of how you would create Inference client - if connection.connection_type == ConnectionType.AZURE_OPEN_AI: - - from openai import AsyncAzureOpenAI - - if connection.authentication_type == AuthenticationType.API_KEY: - print("====> Creating AzureOpenAI client using API key authentication") - client = AsyncAzureOpenAI( - api_key=connection.key, - azure_endpoint=connection.endpoint_url, - api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - ) - elif connection.authentication_type == AuthenticationType.AAD: - print("====> Creating AzureOpenAI client using Entra ID authentication") - from azure.identity import get_bearer_token_provider - - client = AsyncAzureOpenAI( - # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - ) - else: - raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - - response = await client.chat.completions.create( - model="gpt-4-0613", - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], - ) - print(response.choices[0].message.content) - - elif connection.connection_type == ConnectionType.SERVERLESS: - - from azure.ai.inference.aio import ChatCompletionsClient - from azure.ai.inference.models import UserMessage - - if connection.authentication_type == AuthenticationType.API_KEY: - print("====> Creating ChatCompletionsClient using API key authentication") - from azure.core.credentials import AzureKeyCredential - - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) - ) - elif connection.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - print("====> Creating ChatCompletionsClient using Entra ID authentication") - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential - ) - else: - raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - - response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - await client.close() - print(response.choices[0].message.content) - - -async def main(): - await sample_connections_async() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/connections/sample_connections.py b/sdk/ai/azure-ai-project/samples/connections/sample_connections.py deleted file mode 100644 index 5be7c4195349..000000000000 --- a/sdk/ai/azure-ai-project/samples/connections/sample_connections.py +++ /dev/null @@ -1,120 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_connections.py - -DESCRIPTION: - Given an AIProjectClient, this sample demonstrates how to enumerate connections - and get connection properties. - -USAGE: - python sample_connections.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set the environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os -from azure.ai.project import AIProjectClient -from azure.ai.project.models import ConnectionType, AuthenticationType -from openai import AzureOpenAI -from azure.ai.inference import ChatCompletionsClient -from azure.ai.inference.models import UserMessage -from azure.identity import DefaultAzureCredential, get_bearer_token_provider -from azure.core.credentials import AzureKeyCredential - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) - -with project_client: - # List the properties of all connections - connections = project_client.connections.list() - print(f"====> Listing of all connections (found {len(connections)}):") - for connection in connections: - print(connection) - - # List the properties of all connections of a particular "type" (In this sample, Azure OpenAI connections) - connections = project_client.connections.list( - connection_type=ConnectionType.AZURE_OPEN_AI, - ) - print("====> Listing of all Azure Open AI connections (found {len(connections)}):") - for connection in connections: - print(connection) - - # Get the properties of the default connection of a particular "type", with credentials - connection = project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, - with_credentials=True, # Optional. Defaults to "False" - ) - print("====> Get default Azure Open AI connection:") - print(connection) - - # Get the properties of a connection by connection name: - connection = project_client.connections.get( - connection_name=os.environ["AI_CLIENT_CONNECTION_NAME"], with_credentials=True # Optional. Defaults to "False" - ) - print("====> Get connection by name:") - print(connection) - - -# Examples of how you would create Inference client -if connection.connection_type == ConnectionType.AZURE_OPEN_AI: - - if connection.authentication_type == AuthenticationType.API_KEY: - print("====> Creating AzureOpenAI client using API key authentication") - client = AzureOpenAI( - api_key=connection.key, - azure_endpoint=connection.endpoint_url, - api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - ) - elif connection.authentication_type == AuthenticationType.AAD: - print("====> Creating AzureOpenAI client using Entra ID authentication") - client = AzureOpenAI( - # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - ) - else: - raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - - response = client.chat.completions.create( - model="gpt-4-0613", - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], - ) - client.close() - print(response.choices[0].message.content) - -elif connection.connection_type == ConnectionType.SERVERLESS: - - if connection.authentication_type == AuthenticationType.API_KEY: - print("====> Creating ChatCompletionsClient using API key authentication") - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) - elif connection.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - print("====> Creating ChatCompletionsClient using Entra ID authentication") - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential - ) - else: - raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - - response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - client.close() - print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-project/samples/evaluations/evaluate_test_data.jsonl b/sdk/ai/azure-ai-project/samples/evaluations/evaluate_test_data.jsonl deleted file mode 100644 index 0396a22c2db6..000000000000 --- a/sdk/ai/azure-ai-project/samples/evaluations/evaluate_test_data.jsonl +++ /dev/null @@ -1,3 +0,0 @@ -{"query":"How do you create a run?","context":"AML API only","response":"To create a run using the Azure Machine Learning API, you first need to create an Experiment. Once you have an experiment, you can create a Run object that is associated with that experiment. Here is some Python code that demonstrates this process:\n\n```\nfrom azureml.core import Experiment, Run\nfrom azureml.core.workspace import Workspace\n\n# Define workspace and experiment\nws = Workspace.from_config()\nexp = Experiment(workspace=ws, name='my_experiment')\n\n# Create a new run\nrun = exp.start_logging()\n```\n\nIn this code, the `from_config()` method reads the configuration file that you created when you set up your Azure Machine Learning workspace. The `Experiment` constructor creates an Experiment object that is associated with your workspace, and the `start_logging()` method creates a new Run object that is associated with the Experiment. Now you can use the `run` object to log metrics, upload files, and track other information related to your machine learning experiment.", "ground_truth":"Paris is the capital of France."} -{"query":"How do you log a model?","context":"Logging can be done using any OSS Sdk","response":"There are a few ways to log models in Azure Machine Learning. \n\nOne way is to use the `register_model()` method of the `Run` object. The `register_model()` method logs a model file in the Azure Machine Learning service workspace and makes it available for deployment. Here's an example:\n\n```python\nfrom azureml.core import Model\n\nmodel_path = '.\/outputs\/my_model.pkl'\nmodel = Model.register(workspace=ws, model_path=model_path, model_name='my_model')\n```\n\nThis code registers the model file located at `model_path` to the Azure Machine Learning service workspace with the name `my_model`. \n\nAnother way to log a model is to save it as an output of a `Run`. If your model generation code is part of a script or Jupyter notebook that runs as an Azure Machine Learning experiment, you can save the model file as an output of the `Run` object. Here's an example:\n\n```python\nfrom sklearn.linear_model import LogisticRegression\nfrom azureml.core.run import Run\n\n# Initialize a run object\nrun = Run.get_context()\n\n# Train your model\nX_train, y_train = ...\nclf = LogisticRegression().fit(X_train, y_train)\n\n# Save the model to the Run object's outputs directory\nmodel_path = 'outputs\/model.pkl'\njoblib.dump(value=clf, filename=model_path)\n\n# Log the model as a run artifact\nrun.upload_file(name=model_path, path_or_stream=model_path)\n```\n\nIn this code, `Run.get_context()` retrieves the current run context object, which you can use to track metadata and metrics for the run. After training your model, you can use `joblib.dump()` to save the model to a file, and then log the file as an artifact of the run using `run.upload_file()`.","ground_truth":"Paris is the capital of France."} -{"query":"What is the capital of France?","context":"France is in Europe","response":"Paris is the capital of France.", "ground_truth":"Paris is the capital of France."} diff --git a/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations.py deleted file mode 100644 index 096224c70888..000000000000 --- a/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations.py +++ /dev/null @@ -1,88 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_agents_basics.py - -DESCRIPTION: - This sample demonstrates how to use basic agent operations from - the Azure Agents service using a synchronous client. - -USAGE: - python sample_evaluations.py - - Before running the sample: - - pip install azure-identity - pip install "git+https://github.com/Azure/azure-sdk-for-python.git@users/singankit/ai_project_utils#egg=azure-ai-client&subdirectory=sdk/ai/azure-ai-client" - pip install "git+https://github.com/Azure/azure-sdk-for-python.git@users/singankit/demo_evaluators_id#egg=azure-ai-evaluation&subdirectory=sdk/evaluation/azure-ai-evaluation" - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import os, time -from azure.ai.project import AIProjectClient -from azure.identity import DefaultAzureCredential -from azure.ai.project.models import Evaluation, Dataset, EvaluatorConfiguration, ConnectionType -from azure.ai.evaluation import F1ScoreEvaluator, RelevanceEvaluator, HateUnfairnessEvaluator - -# Create an Azure AI Client from a connection string, copied from your AI Studio project. -# At the moment, it should be in the format ";;;" -# Customer needs to login to Azure subscription via Azure CLI and set the environment variables - -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) - -# Upload data for evaluation -# Service side fix needed to make this work -# data_id = project_client.upload_file("./evaluate_test_data.jsonl") -data_id = "azureml://locations/eastus2/workspaces/faa79f3d-91b3-4ed5-afdc-4cc0fe13fb85/data/remote-evals-data/versions/3" - -default_connection = project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) - - - -# Create an evaluation -evaluation = Evaluation( - display_name="Remote Evaluation", - description="Evaluation of dataset", - data=Dataset(id=data_id), - evaluators={ - "f1_score": EvaluatorConfiguration( - id=F1ScoreEvaluator.evaluator_id, - ), - "relevance": EvaluatorConfiguration( - id=RelevanceEvaluator.evaluator_id, - init_params={ - "model_config": default_connection.to_evaluator_model_config(deployment_name="GPT-4-Prod", api_version="2024-08-01-preview") - }, - ), - "hate_unfairness": EvaluatorConfiguration( - id=HateUnfairnessEvaluator.evaluator_id, - init_params={ - "azure_ai_project": project_client.scope - }, - ), - }, - # This is needed as a workaround until environment gets published to registry - properties={"Environment": "azureml://registries/jamahaja-evals-registry/environments/eval-remote-env/versions/6"}, -) - -# Create evaluation -evaluation_response = project_client.evaluations.create( - evaluation=evaluation, -) - -# Get evaluation -get_evaluation_response = project_client.evaluations.get(evaluation_response.id) - -print("----------------------------------------------------------------") -print("Created evaluation, evaluation ID: ", get_evaluation_response.id) -print("Evaluation status: ", get_evaluation_response.status) -print("AI Studio URI: ", get_evaluation_response.properties["AiStudioEvaluationUri"]) -print("----------------------------------------------------------------") diff --git a/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations_schedules.py b/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations_schedules.py deleted file mode 100644 index 48fb825aa98c..000000000000 --- a/sdk/ai/azure-ai-project/samples/evaluations/sample_evaluations_schedules.py +++ /dev/null @@ -1,75 +0,0 @@ -from azure.ai.project import AIProjectClient - -from azure.identity import DefaultAzureCredential -from azure.ai.project.models import ApplicationInsightsConfiguration, EvaluatorConfiguration, SamplingStrategy, EvaluationSchedule, CronTrigger, RecurrenceTrigger, Frequency, RecurrenceSchedule - -def main(): - # Project Configuration Canary - Subscription = "72c03bf3-4e69-41af-9532-dfcdc3eefef4" - ResourceGroup = "apeddau-rg-westus2" - Workspace = "apeddau-canay-ws-eastus2euap" - Endpoint = "eastus2euap.api.azureml.ms" - - # Create an Azure AI client - ai_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=f"{Endpoint};{Subscription};{ResourceGroup};{Workspace}", - logging_enable=True, # Optional. Remove this line if you don't want to show how to enable logging - ) - - # Sample for creating an evaluation schedule with recurrence trigger of daily frequency - - app_insights_config = ApplicationInsightsConfiguration( - resource_id="/subscriptions/72c03bf3-4e69-41af-9532-dfcdc3eefef4/resourceGroups/apeddau-rg-centraluseuap/providers/Microsoft.insights/components/apeddauwscentr0026977484", - query="traces | where message contains \"\"", - service_name="sample_service_name" - ) - - f1_evaluator_config = EvaluatorConfiguration( - id="azureml://registries/model-evaluation-dev-01/models/F1ScoreEval/versions/1", - init_params={ - "column_mapping": { - "response": "${data.message}", - "ground_truth": "${data.itemType}" - } - } - ) - - recurrence_trigger = RecurrenceTrigger(frequency="daily", interval=1) - evaluators = { - "f1_score": f1_evaluator_config, - } - - sampling_strategy = SamplingStrategy(rate=0.2) - name = "CANARY-ONLINE-EVAL-TEST-WS-ENV-104" - description = "Testing Online eval command job in CANARY environment" - tags = {"tag1": "value1", "tag2": "value2"} - properties = {"Environment": "azureml://registries/apeddau-online-evals-registry/environments/online-eval-env/versions/1"} - - evaluation_schedule = EvaluationSchedule( - data=app_insights_config, - evaluators=evaluators, - trigger=recurrence_trigger, - sampling_strategy=sampling_strategy, - description=description, - tags=tags, - properties=properties - ) - - evaluation_schedule = ai_client.evaluations.create_or_replace_schedule(name, evaluation_schedule) - print(evaluation_schedule.provisioning_status) - print(evaluation_schedule) - - # Sample for get an evaluation schedule with name - evaluation_schedule = ai_client.evaluations.get_schedule(name) - print(evaluation_schedule) - - # Sample for list evaluation schedules - for evaluation_schedule in ai_client.evaluations.list_schedule(): - print(evaluation_schedule) - - # Sample for delete an evaluation schedule with name - ai_client.evaluations.delete_schedule(name) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_azure_openai_client_async.py b/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_azure_openai_client_async.py deleted file mode 100644 index bf2f4324e6bd..000000000000 --- a/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_azure_openai_client_async.py +++ /dev/null @@ -1,57 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_get_azure_openai_client_async.py - -DESCRIPTION: - Given an AIProjectClient, this sample demonstrates how to get an authenticated - AsyncAzureOpenAI client from the azure.ai.inference package. - -USAGE: - python sample_get_azure_openai_client_async.py - - Before running the sample: - - pip install azure.ai.project aiohttp openai_async - - Set this environment variable with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import os -import asyncio -from azure.ai.project.aio import AIProjectClient -from azure.identity import DefaultAzureCredential - - -async def sample_get_azure_openai_client_async(): - - async with AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], - ) as project_client: - - # Get an authenticated AsyncAzureOpenAI client for your default Azure OpenAI connection: - async with await project_client.inference.get_azure_openai_client() as client: - - response = await client.chat.completions.create( - model="gpt-4-0613", - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], - ) - - print(response.choices[0].message.content) - - -async def main(): - await sample_get_azure_openai_client_async() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_chat_completions_client_async.py b/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_chat_completions_client_async.py deleted file mode 100644 index 95a6735b7493..000000000000 --- a/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_chat_completions_client_async.py +++ /dev/null @@ -1,49 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_get_chat_completions_client_async.py - -DESCRIPTION: - Given an AIProjectClient, this sample demonstrates how to get an authenticated - async ChatCompletionsClient from the azure.ai.inference package. - -USAGE: - python sample_get_chat_completions_client_async.py - - Before running the sample: - - pip install azure.ai.project aiohttp azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import os -import asyncio -from azure.ai.project.aio import AIProjectClient -from azure.ai.inference.models import UserMessage -from azure.identity import DefaultAzureCredential - - -async def sample_get_chat_completions_client_async(): - - async with AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], - ) as project_client: - - # Get an authenticated async ChatCompletionsClient (from azure.ai.inference) for your default Serverless connection: - async with await project_client.inference.get_chat_completions_client() as client: - - response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - print(response.choices[0].message.content) - - -async def main(): - await sample_get_chat_completions_client_async() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_embeddings_client_async.py b/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_embeddings_client_async.py deleted file mode 100644 index d18836f79ce3..000000000000 --- a/sdk/ai/azure-ai-project/samples/inference/async_samples/sample_get_embeddings_client_async.py +++ /dev/null @@ -1,54 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_get_embeddings_client_async.py - -DESCRIPTION: - Given an AIProjectClient, this sample demonstrates how to get an authenticated - async EmbeddingsClient from the azure.ai.inference package. - -USAGE: - python sample_get_embeddings_client_async.py - - Before running the sample: - - pip install azure.ai.project aiohttp azure-identity - - Set this environment variable with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import asyncio -import os -from azure.ai.project.aio import AIProjectClient -from azure.identity import DefaultAzureCredential - - -async def sample_get_embeddings_client_async(): - - async with AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], - ) as project_client: - - # Get an authenticated async azure.ai.inference embeddings client for your default Serverless connection: - async with await project_client.inference.get_embeddings_client() as client: - - response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) - - for item in response.data: - length = len(item.embedding) - print( - f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " - f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" - ) - - -async def main(): - await sample_get_embeddings_client_async() - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/sdk/ai/azure-ai-project/samples/inference/sample_get_azure_openai_client.py b/sdk/ai/azure-ai-project/samples/inference/sample_get_azure_openai_client.py deleted file mode 100644 index 5f68cc4865e9..000000000000 --- a/sdk/ai/azure-ai-project/samples/inference/sample_get_azure_openai_client.py +++ /dev/null @@ -1,45 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_get_azure_openai_client.py - -DESCRIPTION: - Given an AIProjectClient, this sample demonstrates how to get an authenticated - AsyncAzureOpenAI client from the azure.ai.inference package. - -USAGE: - python sample_get_azure_openai_client.py - - Before running the sample: - - pip install azure.ai.project openai - - Set this environment variable with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import os -from azure.ai.project import AIProjectClient -from azure.identity import DefaultAzureCredential - -with AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) as project_client: - - # Get an authenticated OpenAI client for your default Azure OpenAI connection: - with project_client.inference.get_azure_openai_client() as client: - - response = client.chat.completions.create( - model="gpt-4-0613", - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], - ) - - print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-project/samples/inference/sample_get_embeddings_client.py b/sdk/ai/azure-ai-project/samples/inference/sample_get_embeddings_client.py deleted file mode 100644 index 96e7d97618f2..000000000000 --- a/sdk/ai/azure-ai-project/samples/inference/sample_get_embeddings_client.py +++ /dev/null @@ -1,42 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_get_embeddings_client.py - -DESCRIPTION: - Given an AIProjectClient, this sample demonstrates how to get an authenticated - async EmbeddingsClient from the azure.ai.inference package. - -USAGE: - python sample_get_embeddings_client.py - - Before running the sample: - - pip install azure.ai.project azure-identity - - Set this environment variable with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import os -from azure.ai.project import AIProjectClient -from azure.identity import DefaultAzureCredential - -with AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) as project_client: - - # Get an authenticated azure.ai.inference embeddings client for your default Serverless connection: - with project_client.inference.get_embeddings_client() as client: - - response = client.embed(input=["first phrase", "second phrase", "third phrase"]) - - for item in response.data: - length = len(item.embedding) - print( - f"data[{item.index}]: length={length}, [{item.embedding[0]}, {item.embedding[1]}, " - f"..., {item.embedding[length-2]}, {item.embedding[length-1]}]" - ) diff --git a/sdk/ai/azure-ai-project/setup.py b/sdk/ai/azure-ai-project/setup.py deleted file mode 100644 index 3fe61e5b4587..000000000000 --- a/sdk/ai/azure-ai-project/setup.py +++ /dev/null @@ -1,71 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) Python Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# coding: utf-8 - -import os -import re -from setuptools import setup, find_packages - - -PACKAGE_NAME = "azure-ai-project" -PACKAGE_PPRINT_NAME = "Azure AI Project" - -# a-b-c => a/b/c -package_folder_path = PACKAGE_NAME.replace("-", "/") - -# Version extraction inspired from 'requests' -with open(os.path.join(package_folder_path, "_version.py"), "r") as fd: - version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) - -if not version: - raise RuntimeError("Cannot find version information") - - -setup( - name=PACKAGE_NAME, - version=version, - description="Microsoft {} Client Library for Python".format(PACKAGE_PPRINT_NAME), - long_description=open("README.md", "r").read(), - long_description_content_type="text/markdown", - license="MIT License", - author="Microsoft Corporation", - author_email="azpysdkhelp@microsoft.com", - url="https://github.com/Azure/azure-sdk-for-python/tree/main/sdk", - keywords="azure, azure sdk", - classifiers=[ - "Development Status :: 4 - Beta", - "Programming Language :: Python", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "License :: OSI Approved :: MIT License", - ], - zip_safe=False, - packages=find_packages( - exclude=[ - "tests", - # Exclude packages that will be covered by PEP420 or nspkg - "azure", - "azure.ai", - ] - ), - include_package_data=True, - package_data={ - "azure.ai.project": ["py.typed"], - }, - install_requires=[ - "isodate>=0.6.1", - "azure-core>=1.30.0", - "typing-extensions>=4.6.0", - ], - python_requires=">=3.8", -) diff --git a/sdk/ai/azure-ai-project/tests/README.md b/sdk/ai/azure-ai-project/tests/README.md deleted file mode 100644 index 248eb90b98fc..000000000000 --- a/sdk/ai/azure-ai-project/tests/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# Azure AI Project client library tests for Python - -The instructions below are for running tests locally, on a Windows machine, against the live service. - -## Build and install the client library - -- Clone or download this sample repository. -- Open a command prompt window in the folder `sdk\ai\azure-ai-project` -- If you want to run tests against the latest published client library, install it by running: - ```bash - pip install azure-ai-project - ``` -- If you want to run tests against a locally built client library: - - First build the wheel: - ```bash - pip install wheel - pip install -r dev_requirements.txt - python setup.py bdist_wheel - ``` - - Then install the resulting local wheel (update version `1.0.0b1` to the current one): - ```bash - pip install dist\azure_ai_project-1.0.0b1-py3-none-any.whl --user --force-reinstall - ``` - -## Setup for running tests in the `agents` folder - -```bash -set PROJECT_CONNECTION_STRING_AGENTS_TESTS= -``` - -## Setup for running tests in the `evaluations` folder - -## Setup for running tests in the `connections` and `inference` folders - -You need an Azure AI Project that has the following: - -TODO - -Copy the `Project connection string` from the Azure AI Studio and set the following environment variable: - -```bash -set PROJECT_CONNECTION_STRING_CONNECTIONS_TESTS= -``` - -## Configure test proxy - -Configure the test proxy to run live service tests without recordings: - -```bash -set AZURE_TEST_RUN_LIVE=true -set AZURE_SKIP_LIVE_RECORDING=true -set PROXY_URL=http://localhost:5000 -set AZURE_TEST_USE_CLI_AUTH=true -``` - -## Run tests - -To run all tests, type: - -```bash -pytest -``` - -To run tests in a particular folder (`tests\connections` for example): - -```bash -python tests\connections -``` - -## Additional information - -See [test documentation](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md) for additional information, including how to set proxy recordings and run tests using recordings. \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-project/tests/agents/test_agents_client.py deleted file mode 100644 index 2d057e90da5d..000000000000 --- a/sdk/ai/azure-ai-project/tests/agents/test_agents_client.py +++ /dev/null @@ -1,1119 +0,0 @@ -# pylint: disable=too-many-lines -# # ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import os -import json -import time -import functools -import datetime -import logging -import sys - -from azure.ai.project import AIProjectClient -from azure.ai.project.models import FunctionTool, CodeInterpreterTool, FileSearchTool, ToolSet -from azure.core.pipeline.transport import RequestsTransport -from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy -from azure.core.exceptions import AzureError, ServiceRequestError, HttpResponseError -from azure.ai.project.models import FunctionTool -from azure.identity import DefaultAzureCredential - -# TODO clean this up / get rid of anything not in use - -""" -issues I've noticed with the code: - delete_thread(thread.id) fails - cancel_thread(thread.id) expires/times out occasionally - added time.sleep() to the beginning of my last few tests to avoid limits - when using the endpoint from Howie, delete_agent(agent.id) did not work but would not cause an error -""" - -# Set to True to enable SDK logging -LOGGING_ENABLED = True - -if LOGGING_ENABLED: - # Create a logger for the 'azure' SDK - # See https://docs.python.org/3/library/logging.html - logger = logging.getLogger("azure") - logger.setLevel(logging.DEBUG) # INFO or DEBUG - - # Configure a console output - handler = logging.StreamHandler(stream=sys.stdout) - logger.addHandler(handler) - - -agentClientPreparer = functools.partial( - EnvironmentVariableLoader, - "azure_ai_project", - project_connection_string_agents_tests="https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", -) -""" -agentClientPreparer = functools.partial( - EnvironmentVariableLoader, - 'azure_ai_project', - azure_ai_project_host_name="https://foo.bar.some-domain.ms", - azure_ai_project_subscription_id="00000000-0000-0000-0000-000000000000", - azure_ai_project_resource_group_name="rg-resour-cegr-oupfoo1", - azure_ai_project_workspace_name="abcd-abcdabcdabcda-abcdefghijklm", -) -""" - - -# create tool for agent use -def fetch_current_datetime_live(): - """ - Get the current time as a JSON string. - - :return: Static time string so that test recordings work. - :rtype: str - """ - current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - time_json = json.dumps({"current_time": current_datetime}) - return time_json - - -# create tool for agent use -def fetch_current_datetime_recordings(): - """ - Get the current time as a JSON string. - - :return: Static time string so that test recordings work. - :rtype: str - """ - time_json = json.dumps({"current_time": "2024-10-10 12:30:19"}) - return time_json - - -# Statically defined user functions for fast reference -user_functions_recording = {"fetch_current_datetime": fetch_current_datetime_recordings} -user_functions_live = {"fetch_current_datetime": fetch_current_datetime_live} - - -# The test class name needs to start with "Test" to get collected by pytest -class TestagentClient(AzureRecordedTestCase): - - # helper function: create client and using environment variables - def create_client(self, **kwargs): - # fetch environment variables - connection_string = kwargs.pop("project_connection_string_agents_tests") - credential = self.get_credential(AIProjectClient, is_async=False) - - # create and return client - client = AIProjectClient.from_connection_string( - credential=credential, - connection=connection_string, - ) - - return client - - # for debugging purposes: if a test fails and its agent has not been deleted, it will continue to show up in the agents list - """ - # NOTE: this test should not be run against a shared resource, as it will delete all agents - @agentClientPreparer() - @recorded_by_proxy - def test_clear_client(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # clear agent list - agents = client.agents.list_agents().data - for agent in agents: - client.agents.delete_agent(agent.id) - assert client.agents.list_agents().data.__len__() == 0 - - # close client - client.close() - """ - - # # ********************************************************************************** - # # - # # UNIT TESTS - # # - # # ********************************************************************************** - - # # ********************************************************************************** - # # - # # HAPPY PATH SERVICE TESTS - agent APIs - # # - # # ********************************************************************************** - - # test client creation - @agentClientPreparer() - @recorded_by_proxy - def test_create_client(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # close client - client.close() - - # test agent creation and deletion - @agentClientPreparer() - @recorded_by_proxy - def test_create_delete_agent(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - print("Created client") - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test agent creation with tools - @agentClientPreparer() - @recorded_by_proxy - def test_create_agent_with_tools(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # initialize agent functions - functions = FunctionTool(functions=user_functions_recording) - - # create agent with tools - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=functions.definitions - ) - assert agent.id - print("Created agent, agent ID", agent.id) - assert agent.tools - assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] - print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - @agentClientPreparer() - @recorded_by_proxy - def test_update_agent(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - - # update agent and confirm changes went through - agent.update(name="my-agent2", instructions="You are helpful agent") - assert agent.name - assert agent.name == "my-agent2" - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - """ - DISABLED: can't perform consistently on shared resource - @agentClientPreparer() - @recorded_by_proxy - def test_agent_list(self, **kwargs): - # create client and ensure there are no previous agents - client = self.create_client(**kwargs) - list_length = client.agents.list_agents().data.__len__() - - # create agent and check that it appears in the list - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert client.agents.list_agents().data.__len__() == list_length + 1 - assert client.agents.list_agents().data[0].id == agent.id - - # create second agent and check that it appears in the list - agent2 = client.agents.create_agent(model="gpt-4o", name="my-agent2", instructions="You are helpful agent") - assert client.agents.list_agents().data.__len__() == list_length + 2 - assert client.agents.list_agents().data[0].id == agent.id or client.agents.list_agents().data[1].id == agent.id - - # delete agents and check list - client.agents.delete_agent(agent.id) - assert client.agents.list_agents().data.__len__() == list_length + 1 - assert client.agents.list_agents().data[0].id == agent2.id - - client.agents.delete_agent(agent2.id) - assert client.agents.list_agents().data.__len__() == list_length - print("Deleted agents") - - # close client - client.close() - """ - - # ********************************************************************************** - # - # HAPPY PATH SERVICE TESTS - Thread APIs - # - # ********************************************************************************** - - # test creating thread - @agentClientPreparer() - @recorded_by_proxy - def test_create_thread(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - # assert isinstance(thread, agentThread) TODO finish this ! need to import agentThread from _models - assert thread.id - print("Created thread, thread ID", thread.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test getting thread - @agentClientPreparer() - @recorded_by_proxy - def test_get_thread(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # get thread - thread2 = client.agents.get_thread(thread.id) - assert thread2.id - assert thread.id == thread2.id - print("Got thread, thread ID", thread2.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - """ - TODO what can I update a thread with? - # test updating thread - @agentClientPreparer() - @recorded_by_proxy - def test_update_thread(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # update thread - client.agents.update_thread(thread.id, ) # TODO what can we update it with? - assert not thread - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - """ - - """ - # TODO this test is failing? client.agents.delete_thread(thread.id) isn't working - # status_code = 404, response = - # error_map = {304: , 401: , 409: } - - # test deleting thread - @agentClientPreparer() - @recorded_by_proxy - def test_delete_thread(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - # assert isinstance(thread, agentThread) TODO finish this ! need to import agentThread from _models - assert thread.id - print("Created thread, thread ID", thread.id) - - # delete thread - deletion_status = client.agents.delete_thread(thread.id) - # assert not thread - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - """ - - # # ********************************************************************************** - # # - # # HAPPY PATH SERVICE TESTS - Message APIs - # # - # # ********************************************************************************** - - # test creating message in a thread - @agentClientPreparer() - @recorded_by_proxy - def test_create_message(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - assert message.id - print("Created message, message ID", message.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test creating multiple messages in a thread - @agentClientPreparer() - @recorded_by_proxy - def test_create_multiple_messages(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create messages - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - assert message.id - print("Created message, message ID", message.id) - message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") - assert message2.id - print("Created message, message ID", message2.id) - message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") - assert message3.id - print("Created message, message ID", message3.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test listing messages in a thread - @agentClientPreparer() - @recorded_by_proxy - def test_list_messages(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # check that initial message list is empty - messages0 = client.agents.list_messages(thread_id=thread.id) - print(messages0.data) - assert messages0.data.__len__() == 0 - - # create messages and check message list for each one - message1 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - assert message1.id - print("Created message, message ID", message1.id) - messages1 = client.agents.list_messages(thread_id=thread.id) - assert messages1.data.__len__() == 1 - assert messages1.data[0].id == message1.id - - message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") - assert message2.id - print("Created message, message ID", message2.id) - messages2 = client.agents.list_messages(thread_id=thread.id) - assert messages2.data.__len__() == 2 - assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id - - message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") - assert message3.id - print("Created message, message ID", message3.id) - messages3 = client.agents.list_messages(thread_id=thread.id) - assert messages3.data.__len__() == 3 - assert ( - messages3.data[0].id == message3.id - or messages3.data[1].id == message2.id - or messages3.data[2].id == message2.id - ) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test getting message in a thread - @agentClientPreparer() - @recorded_by_proxy - def test_get_message(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - assert message.id - print("Created message, message ID", message.id) - - # get message - message2 = client.agents.get_message(thread_id=thread.id, message_id=message.id) - assert message2.id - assert message.id == message2.id - print("Got message, message ID", message.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - """ - TODO format the updated body - # test updating message in a thread - @agentClientPreparer() - @recorded_by_proxy - def test_update_message(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - assert message.id - print("Created message, message ID", message.id) - - # update message - body_json = json.dumps # TODO format body into json -- figure out what the message looks like so I can update it (might be in that picture) - client.agents.update_message(thread_id=thread.id, message_id=message.id, body=) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - """ - - # # ********************************************************************************** - # # - # # HAPPY PATH SERVICE TESTS - Run APIs - # # - # # ********************************************************************************** - - # test creating run - @agentClientPreparer() - @recorded_by_proxy - def test_create_run(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test getting run - @agentClientPreparer() - @recorded_by_proxy - def test_get_run(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - # get run - run2 = client.agents.get_run(thread_id=thread.id, run_id=run.id) - assert run2.id - assert run.id == run2.id - print("Got run, run ID", run2.id) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # TODO fix bc sometimes it works? and sometimes it doesnt? - # test sucessful run status TODO test for cancelled/unsucessful runs - @agentClientPreparer() - @recorded_by_proxy - def test_run_status(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") - assert message.id - print("Created message, message ID", message.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - # check status - assert run.status in [ - "queued", - "in_progress", - "requires_action", - "cancelling", - "cancelled", - "failed", - "completed", - "expired", - ] - while run.status in ["queued", "in_progress", "requires_action"]: - # wait for a second - time.sleep(1) - run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - print("Run status:", run.status) - - assert run.status in ["cancelled", "failed", "completed", "expired"] - print("Run completed with status:", run.status) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - """ - # TODO another, but check that the number of runs decreases after cancelling runs - # TODO can each thread only support one run? - # test listing runs - @agentClientPreparer() - @recorded_by_proxy - def test_list_runs(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # check list for current runs - runs0 = client.agents.list_runs(thread_id=thread.id) - assert runs0.data.__len__() == 0 - - # create run and check list - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - runs1 = client.agents.list_runs(thread_id=thread.id) - assert runs1.data.__len__() == 1 - assert runs1.data[0].id == run.id - - # create second run - run2 = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run2.id - print("Created run, run ID", run2.id) - runs2 = client.agents.list_runs(thread_id=thread.id) - assert runs2.data.__len__() == 2 - assert runs2.data[0].id == run2.id or runs2.data[1].id == run2.id - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - """ - - """ - # TODO figure out what to update the run with - # test updating run - @agentClientPreparer() - @recorded_by_proxy - def test_update_run(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - # update run - body = json.dumps({'todo': 'placeholder'}) - client.agents.update_run(thread_id=thread.id, run_id=run.id, body=body) - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - """ - - # test submitting tool outputs to run - @agentClientPreparer() - @recorded_by_proxy - def test_submit_tool_outputs_to_run(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # Initialize agent tools - functions = FunctionTool(user_functions_recording) - code_interpreter = CodeInterpreterTool() - - toolset = ToolSet() - toolset.add(functions) - toolset.add(code_interpreter) - - # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent", toolset=toolset - ) - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") - assert message.id - print("Created message, message ID", message.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - # check that tools are uploaded - assert run.tools - assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] - print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) - - # check status - assert run.status in [ - "queued", - "in_progress", - "requires_action", - "cancelling", - "cancelled", - "failed", - "completed", - "expired", - ] - while run.status in ["queued", "in_progress", "requires_action"]: - time.sleep(1) - run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - - # check if tools are needed - if run.status == "requires_action" and run.required_action.submit_tool_outputs: - print("Requires action: submit tool outputs") - tool_calls = run.required_action.submit_tool_outputs.tool_calls - if not tool_calls: - print( - "No tool calls provided - cancelling run" - ) # TODO how can i make sure that it wants tools? should i have some kind of error message? - client.agents.cancel_run(thread_id=thread.id, run_id=run.id) - break - - # submit tool outputs to run - tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here - print("Tool outputs:", tool_outputs) - if tool_outputs: - client.agents.submit_tool_outputs_to_run( - thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs - ) - - print("Current run status:", run.status) - - print("Run completed with status:", run.status) - - # check that messages used the tool - messages = client.agents.list_messages(thread_id=thread.id, run_id=run.id) - tool_message = messages["data"][0]["content"][0]["text"]["value"] - hour12 = time.strftime("%H") - hour24 = time.strftime("%I") - minute = time.strftime("%M") - assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute - print("Used tool_outputs") - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - """ - # DISABLED: rewrite to ensure run is not complete when cancel_run is called - # test cancelling run - @agentClientPreparer() - @recorded_by_proxy - def test_cancel_run(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") - assert message.id - print("Created message, message ID", message.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - # check status and cancel - assert run.status in ["queued", "in_progress", "requires_action"] - client.agents.cancel_run(thread_id=thread.id, run_id=run.id) - - while run.status in ["queued", "cancelling"]: - time.sleep(1) - run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - print("Current run status:", run.status) - assert run.status == "cancelled" - print("Run cancelled") - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - """ - - # test create thread and run - @agentClientPreparer() - @recorded_by_proxy - def test_create_thread_and_run(self, **kwargs): - time.sleep(26) - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread and run - run = client.agents.create_thread_and_run(assistant_id=agent.id) - assert run.id - assert run.thread_id - print("Created run, run ID", run.id) - - # get thread - thread = client.agents.get_thread(run.thread_id) - assert thread.id - print("Created thread, thread ID", thread.id) - - # check status - assert run.status in [ - "queued", - "in_progress", - "requires_action", - "cancelling", - "cancelled", - "failed", - "completed", - "expired", - ] - while run.status in ["queued", "in_progress", "requires_action"]: - # wait for a second - time.sleep(1) - run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - # assert run.status in ["queued", "in_progress", "requires_action", "completed"] - print("Run status:", run.status) - - assert run.status == "completed" - print("Run completed") - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test listing run steps - @agentClientPreparer() - @recorded_by_proxy - def test_list_run_step(self, **kwargs): - - time.sleep(50) - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") - assert message.id - print("Created message, message ID", message.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) - # commenting assertion out below, do we know exactly when run starts? - # assert steps['data'].__len__() == 0 - - # check status - assert run.status in ["queued", "in_progress", "requires_action", "completed"] - while run.status in ["queued", "in_progress", "requires_action"]: - # wait for a second - time.sleep(1) - run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - assert run.status in ["queued", "in_progress", "requires_action", "completed"] - print("Run status:", run.status) - steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) - assert steps["data"].__len__() > 0 # TODO what else should we look at? - - assert run.status == "completed" - print("Run completed") - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # test getting run step - # TODO where are step ids from - @agentClientPreparer() - @recorded_by_proxy - def test_get_run_step(self, **kwargs): - # create client - client = self.create_client(**kwargs) - assert isinstance(client, AIProjectClient) - - # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - assert agent.id - print("Created agent, agent ID", agent.id) - - # create thread - thread = client.agents.create_thread() - assert thread.id - print("Created thread, thread ID", thread.id) - - # create message - message = client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" - ) - assert message.id - print("Created message, message ID", message.id) - - # create run - run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) - assert run.id - print("Created run, run ID", run.id) - - if run.status == "failed": - assert run.last_error - print(run.last_error) - print("FAILED HERE") - - # check status - assert run.status in ["queued", "in_progress", "requires_action", "completed"] - while run.status in ["queued", "in_progress", "requires_action"]: - # wait for a second - time.sleep(1) - run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - if run.status == "failed": - assert run.last_error - print(run.last_error) - print("FAILED HERE") - assert run.status in ["queued", "in_progress", "requires_action", "completed"] - print("Run status:", run.status) - - # list steps, check that get_run_step works with first step_id - steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) - assert steps["data"].__len__() > 0 - step = steps["data"][0] - get_step = client.agents.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) - assert step == get_step - - # delete agent and close client - client.agents.delete_agent(agent.id) - print("Deleted agent") - client.close() - - # # ********************************************************************************** - # # - # # HAPPY PATH SERVICE TESTS - Streaming APIs - # # - # # ********************************************************************************** - - # # ********************************************************************************** - # # - # # NEGATIVE TESTS - TODO idk what goes here - # # - # # ********************************************************************************** - - """ - # DISABLED, PASSES LIVE ONLY: recordings don't capture DNS lookup errors - # test agent creation and deletion - @agentClientPreparer() - @recorded_by_proxy - def test_negative_create_delete_agent(self, **kwargs): - # create client using bad endpoint - bad_connection_string = "https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm" - - credential = self.get_credential(AIProjectClient, is_async=False) - client = AIProjectClient.from_connection_string( - credential=credential, - connection=bad_connection_string, - ) - - # attempt to create agent with bad client - exception_caught = False - try: - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - # check for error (will not have a status code since it failed on request -- no response was recieved) - except (ServiceRequestError, HttpResponseError) as e: - exception_caught = True - if type(e) == ServiceRequestError: - assert e.message - assert "failed to resolve 'foo.bar.some-domain.ms'" in e.message.lower() - else: - assert "No such host is known" and "foo.bar.some-domain.ms" in str(e) - - # close client and confirm an exception was caught - client.close() - assert exception_caught - """ diff --git a/sdk/ai/azure-ai-project/tests/agents/test_deserialization.py b/sdk/ai/azure-ai-project/tests/agents/test_deserialization.py deleted file mode 100644 index afb2c5de9501..000000000000 --- a/sdk/ai/azure-ai-project/tests/agents/test_deserialization.py +++ /dev/null @@ -1,92 +0,0 @@ -# # ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import copy -import datetime -import pytest - -from azure.ai.project.models._models import ThreadRun, RunStep, ThreadMessage -from azure.ai.project.models._patch import _safe_instantiate, _filter_parameters - -class TestDeserialization: - """Tests for deserialization of sse responses.""" - - @pytest.mark.parametrize( - "valid_params,model_cls", - [ - ( - { - "id": "12345", - "object": "thread.run", - "thread_id": "6789", - "assistant_id": "101112", - "status": "in_progress", - "required_action": "test", - "last_error": "none", - "model": "gpt-4", - "instructions": "Test instruction", - "tools": "Test function", - "created_at": datetime.datetime(2024, 11, 14), - "expires_at": datetime.datetime(2024, 11, 17), - "started_at": datetime.datetime(2024, 11, 15), - "completed_at": datetime.datetime(2024, 11, 16), - "cancelled_at": datetime.datetime(2024, 11, 16), - "failed_at": datetime.datetime(2024, 11, 16), - "incomplete_details": "max_completion_tokens", - "usage": "in_progress", - "temperature": 1.0, - "top_p": 1.0, - "max_completion_tokens": 1000, - "truncation_strategy": "test", - "tool_choice": "tool name", - "response_format": "json", - "metadata": {"foo": "bar"}, - "tool_resources": "test", - "parallel_tool_calls": True, - }, - ThreadRun, - ), - ( - { - "id": "1233", - "object": "thread.message", - "created_at": datetime.datetime(2024, 11, 14), - "thread_id": "5678", - "status": "incomplete", - "incomplete_details": "test", - "completed_at": datetime.datetime(2024, 11, 16), - "incomplete_at": datetime.datetime(2024, 11, 16), - "role": "assistant", - "content": "Test", - "assistant_id": "9911", - "run_id": "11", - "attachments": ["4", "8", "15", "16", "23", "42"], - "metadata": {"foo", "bar"}, - }, - ThreadMessage, - ), - ], - ) - def test_correct_thread_params(self, valid_params, model_cls): - """Test that if service returned extra parameter in SSE response, it does not create issues.""" - - bad_params = {"foo": "bar"} - params = copy.deepcopy(valid_params) - params.update(bad_params) - # We should bot e able to create Thread Run with bad parameters. - with pytest.raises(TypeError): - model_cls(**params) - filtered_params = _filter_parameters(model_cls, params) - for k in valid_params: - assert k in filtered_params - for k in bad_params: - assert k not in filtered_params - # Implicitly check that we can create object with the filtered parameters. - model_cls(**filtered_params) - # Check safe initialization. - assert isinstance(_safe_instantiate(model_cls, params), model_cls) - - def test_safe_instantiate_non_dict(self): - """Test that safe_instantiate method when user supplies not a dictionary.""" - assert _safe_instantiate(RunStep, 42) == 42 diff --git a/sdk/ai/azure-ai-project/tests/conftest.py b/sdk/ai/azure-ai-project/tests/conftest.py deleted file mode 100644 index d944cdf86007..000000000000 --- a/sdk/ai/azure-ai-project/tests/conftest.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -import pytest -from devtools_testutils import test_proxy, remove_batch_sanitizers - - -# autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method -@pytest.fixture(scope="session", autouse=True) -def start_proxy(test_proxy): - return - - -@pytest.fixture(scope="session", autouse=True) -def add_sanitizers(test_proxy): - # Remove the following sanitizers since certain fields are needed in tests and are non-sensitive: - # - AZSDK3493: $..name - remove_batch_sanitizers(["AZSDK3493"]) diff --git a/sdk/ai/azure-ai-project/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-project/tests/connections/connection_test_base.py deleted file mode 100644 index d2b4177f8a1e..000000000000 --- a/sdk/ai/azure-ai-project/tests/connections/connection_test_base.py +++ /dev/null @@ -1,40 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import sys -import logging -import functools -from azure.ai.project import AIProjectClient -from azure.identity import DefaultAzureCredential -from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader - -ServicePreparerChatCompletions = functools.partial( - EnvironmentVariableLoader, - "project_connection_string", - project_connection_string_connections_tests="endpoint;azure-subscription-id;azure-rg-name;ai-studio-hub-name", -) - -# Set to True to enable SDK logging -LOGGING_ENABLED = True - -if LOGGING_ENABLED: - # Create a logger for the 'azure' SDK - # See https://docs.python.org/3/library/logging.html - logger = logging.getLogger("azure") - logger.setLevel(logging.DEBUG) # INFO or DEBUG - - # Configure a console output - handler = logging.StreamHandler(stream=sys.stdout) - logger.addHandler(handler) - -class ConnectionsTestBase: - - def get_sync_client(self, **kwargs) -> AIProjectClient: - conn_str = kwargs.pop("project_connection_string_connections_tests") - project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=conn_str, - ) - return project_client - diff --git a/sdk/ai/azure-ai-project/tests/connections/test_connections.py b/sdk/ai/azure-ai-project/tests/connections/test_connections.py deleted file mode 100644 index b24558903416..000000000000 --- a/sdk/ai/azure-ai-project/tests/connections/test_connections.py +++ /dev/null @@ -1,27 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import sys -import logging -import datetime - -from azure.ai.project.models import SASTokenCredential -from azure.core.credentials import TokenCredential, AccessToken -from azure.core.exceptions import HttpResponseError - -from connection_test_base import ConnectionsTestBase - - -# The test class name needs to start with "Test" to get collected by pytest -class TestConnections(ConnectionsTestBase): - - def test_get_connection(self, **kwargs): - project_client = self.get_sync_client(**kwargs) - pass - - def test_get_default_connection(self, **kwargs): - pass - - def test_list_connections(self, **kwargs): - pass \ No newline at end of file diff --git a/sdk/ai/azure-ai-project/tests/connections/test_connections_unit_tests.py b/sdk/ai/azure-ai-project/tests/connections/test_connections_unit_tests.py deleted file mode 100644 index 9199a91067f0..000000000000 --- a/sdk/ai/azure-ai-project/tests/connections/test_connections_unit_tests.py +++ /dev/null @@ -1,98 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import datetime -from azure.ai.project.models import SASTokenCredential -from azure.core.credentials import TokenCredential, AccessToken -from azure.core.exceptions import HttpResponseError -from connection_test_base import ConnectionsTestBase - -class FakeTokenCredential(TokenCredential): - def get_token(self, *scopes, **kwargs): - # Create a fake token with an expiration time - token = "fake_token" - expires_on = datetime.datetime.now() + datetime.timedelta(hours=1) - return AccessToken(token, expires_on.timestamp()) - - -# The test class name needs to start with "Test" to get collected by pytest -class TestConnectionsUnitTests(ConnectionsTestBase): - - # ********************************************************************************** - # - # UNIT TESTS - # - # ********************************************************************************** - - def test_sas_token_credential_class_mocked(self, **kwargs): - import jwt - import datetime - import time - - # Create a simple JWT with 10 seconds expiration time - token_duration_sec = 5 - secret_key = "my_secret_key" - token_duration_sec = 5 - sas_token_expiration: datetime = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta( - seconds=token_duration_sec - ) - sas_token_expiration = sas_token_expiration.replace(microsecond=0) - payload = {"exp": sas_token_expiration} - sas_token = jwt.encode(payload, secret_key) - - # You can parse the token string on https://jwt.ms/. The "exp" value there is the - # token expiration time in Unix timestamp format (seconds since 1970-01-01 00:00:00 UTC). - # See https://www.epochconverter.com/ to convert Unix time to readable date & time. - # The base64 decoded string will look something like this: - # { - # "alg": "HS256", - # "typ": "JWT" - # }.{ - # "exp": 1727208894 - # }.[Signature] - print(f"Generated JWT token: {sas_token}") - - sas_token_credential = SASTokenCredential( - sas_token=sas_token, - credential=FakeTokenCredential(), - subscription_id="fake_subscription_id", - resource_group_name="fake_resouce_group", - project_name="fake_project_name", - connection_name="fake_connection_name", - ) - assert sas_token_credential._expires_on == sas_token_expiration - - exception_caught = False - try: - for _ in range(token_duration_sec + 2): - print("Looping...") - time.sleep(1) - access_token = sas_token_credential.get_token() - except HttpResponseError as e: - exception_caught = True - print(e) - assert exception_caught - - # Unit tests for the SASTokenCredential class - def test_sas_token_credential_class_real(self, **kwargs): - - # Example of real SAS token for AOAI service. You can parse it on https://jwt.ms/. The "exp" value there is the - # token expiration time in Unix timestamp format (seconds since 1970-01-01 00:00:00 UTC) - token = "eyJhbGciOiJFUzI1NiIsImtpZCI6ImtleTEiLCJ0eXAiOiJKV1QifQ.eyJyZWdpb24iOiJlYXN0dXMyZXVhcCIsInN1YnNjcmlwdGlvbi1pZCI6IjQyZjVlYWFjMjc5MDRiMGViMDI4ZTVkZjcyYzg5ZDAxIiwicHJvZHVjdC1pZCI6Ik9wZW5BSS5TMCIsImNvZ25pdGl2ZS1zZXJ2aWNlcy1lbmRwb2ludCI6Imh0dHBzOi8vYXBpLmNvZ25pdGl2ZS5taWNyb3NvZnQuY29tL2ludGVybmFsL3YxLjAvIiwiYXp1cmUtcmVzb3VyY2UtaWQiOiIvc3Vic2NyaXB0aW9ucy84ZjMzOGY2ZS00ZmNlLTQ0YWUtOTY5Yy1mYzdkOGZkYTAzMGUvcmVzb3VyY2VHcm91cHMvYXJncnlnb3JfY2FuYXJ5L3Byb3ZpZGVycy9NaWNyb3NvZnQuQ29nbml0aXZlU2VydmljZXMvYWNjb3VudHMvYXJncnlnb3ItY2FuYXJ5LWFvYWkiLCJzY29wZSI6Imh0dHBzOi8vc3BlZWNoLnBsYXRmb3JtLmJpbmcuY29tIiwiYXVkIjoidXJuOm1zLnNwZWVjaCIsImV4cCI6MTcyNjc4MjI0NiwiaXNzIjoidXJuOm1zLmNvZ25pdGl2ZXNlcnZpY2VzIn0.L7VvsXPzbwHQeMS-o9Za4itkU6uP4-KFMyOpTsYD9tpIJa_qChMHDl8FHy5n7K5L1coKg8sJE6LlJICFdU1ALQ" - expiration_date_linux_time = 1726782246 # Value of "exp" field in the token. See https://www.epochconverter.com/ to convert to date & time - expiration_datatime_utc = datetime.datetime.fromtimestamp(expiration_date_linux_time, datetime.timezone.utc) - print(f"\n[TEST] Expected expiration date: {expiration_datatime_utc}") - - sas_token_credential = SASTokenCredential( - sas_token=token, - credential=None, - subscription_id=None, - resource_group_name=None, - project_name=None, - connection_name=None, - ) - - print(f"[TEST] Actual expiration date: {sas_token_credential._expires_on}") - assert sas_token_credential._expires_on == expiration_datatime_utc - diff --git a/sdk/ai/azure-ai-project/tsp-location.yaml b/sdk/ai/azure-ai-project/tsp-location.yaml deleted file mode 100644 index 069a46a40fd5..000000000000 --- a/sdk/ai/azure-ai-project/tsp-location.yaml +++ /dev/null @@ -1,4 +0,0 @@ -directory: specification/ai/Azure.AI.Project -commit: 1fbee44ffdb76894e51943754f374cb210f75e11 -repo: Azure/azure-rest-api-specs -additionalDirectories: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py index 971602b90fb4..ab9ed29d4c3e 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py @@ -16,7 +16,7 @@ from ._configuration import AIProjectClientConfiguration from ._serialization import Deserializer, Serializer -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations +from .operations import AgentsOperations, ConnectionsOperations, DiagnosticsOperations, EvaluationsOperations if TYPE_CHECKING: from azure.core.credentials import TokenCredential @@ -29,6 +29,8 @@ class AIProjectClient: :vartype agents: azure.ai.projects.operations.AgentsOperations :ivar connections: ConnectionsOperations operations :vartype connections: azure.ai.projects.operations.ConnectionsOperations + :ivar diagnostics: DiagnosticsOperations operations + :vartype diagnostics: azure.ai.projects.operations.DiagnosticsOperations :ivar evaluations: EvaluationsOperations operations :vartype evaluations: azure.ai.projects.operations.EvaluationsOperations :param endpoint: The Azure AI Studio project endpoint, in the form @@ -93,6 +95,7 @@ def __init__( self._serialize.client_side_validation = False self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) + self.diagnostics = DiagnosticsOperations(self._client, self._config, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index 53c3c5b6697b..3639241d9dae 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -16,7 +16,7 @@ from azure.core.pipeline import policies from ._configuration import AIProjectClientConfiguration from ._serialization import Deserializer, Serializer -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations, DiagnosticsOperations from ._client import AIProjectClient as ClientGenerated from .operations._patch import InferenceOperations @@ -48,11 +48,46 @@ def __init__( if "credential_scopes" in kwargs: raise ValueError("No support for overriding the credential scopes") + kwargs0 = kwargs.copy() kwargs1 = kwargs.copy() kwargs2 = kwargs.copy() kwargs3 = kwargs.copy() - # For Endpoints operations (enumerating connections, getting SAS tokens) + # For getting AppInsights connection string from the AppInsights resource. + # The AppInsights resource URL is not known at this point. We need to get it from the AzureML "Workspace - Get" REST API call. It will have + # the form: https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} + _endpoint0 = f"https://management.azure.com" # pylint: disable=line-too-long + self._config0 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2020-02-02", + credential_scopes=["https://management.azure.com"], + **kwargs0, + ) + + _policies0 = kwargs0.pop("policies", None) + if _policies0 is None: + _policies0 = [ + policies.RequestIdPolicy(**kwargs0), + self._config0.headers_policy, + self._config0.user_agent_policy, + self._config0.proxy_policy, + policies.ContentDecodePolicy(**kwargs0), + self._config0.redirect_policy, + self._config0.retry_policy, + self._config0.authentication_policy, + self._config0.custom_hook_policy, + self._config0.logging_policy, + policies.DistributedTracingPolicy(**kwargs0), + policies.SensitiveHeaderCleanupPolicy(**kwargs0) if self._config0.redirect_policy else None, + self._config0.http_logging_policy, + ] + self._client0 = PipelineClient(base_url=_endpoint0, policies=_policies0, **kwargs0) + + # For Endpoints operations (listing connections, getting connection properties, getting project properties) _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long self._config1 = AIProjectClientConfiguration( endpoint=endpoint, @@ -149,23 +184,29 @@ def __init__( self._deserialize = Deserializer() self._serialize.client_side_validation = False + self.diagnostics = DiagnosticsOperations( + self._client0, self._config0, self._serialize, self._deserialize, outer_instance=self + ) self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) self.inference = InferenceOperations(self) def close(self) -> None: + self._client0.close() self._client1.close() self._client2.close() self._client3.close() def __enter__(self) -> Self: + self._client0.__enter__() self._client1.__enter__() self._client2.__enter__() self._client3.__enter__() return self def __exit__(self, *exc_details: Any) -> None: + self._client0.__exit__(*exc_details) self._client1.__exit__(*exc_details) self._client2.__exit__(*exc_details) self._client3.__exit__(*exc_details) @@ -203,7 +244,8 @@ def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: from azure.ai.ml.constants import AssetTypes except ImportError: raise ImportError( - "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`") + "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" + ) data = Data( path=file_path, @@ -232,6 +274,7 @@ def scope(self) -> Dict[str, str]: "project_name": self._config3.project_name, } + __all__: List[str] = [ "AIProjectClient", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py index 4608fc35e6b8..2e9f614d420e 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py @@ -16,7 +16,7 @@ from .._serialization import Deserializer, Serializer from ._configuration import AIProjectClientConfiguration -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations +from .operations import AgentsOperations, ConnectionsOperations, DiagnosticsOperations, EvaluationsOperations if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential @@ -29,6 +29,8 @@ class AIProjectClient: :vartype agents: azure.ai.projects.aio.operations.AgentsOperations :ivar connections: ConnectionsOperations operations :vartype connections: azure.ai.projects.aio.operations.ConnectionsOperations + :ivar diagnostics: DiagnosticsOperations operations + :vartype diagnostics: azure.ai.projects.aio.operations.DiagnosticsOperations :ivar evaluations: EvaluationsOperations operations :vartype evaluations: azure.ai.projects.aio.operations.EvaluationsOperations :param endpoint: The Azure AI Studio project endpoint, in the form @@ -93,6 +95,7 @@ def __init__( self._serialize.client_side_validation = False self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) + self.diagnostics = DiagnosticsOperations(self._client, self._config, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) def send_request( diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index d1a7e6d84569..5633cd0ddc22 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -13,7 +13,7 @@ from .._serialization import Deserializer, Serializer from ._configuration import AIProjectClientConfiguration -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations, DiagnosticsOperations from ._client import AIProjectClient as ClientGenerated from .operations._patch import InferenceOperations @@ -45,10 +45,45 @@ def __init__( if "credential_scopes" in kwargs: raise ValueError("No support for overriding the credential scopes") + kwargs0 = kwargs.copy() kwargs1 = kwargs.copy() kwargs2 = kwargs.copy() kwargs3 = kwargs.copy() + # For getting AppInsights connection string from the AppInsights resource. + # The AppInsights resource URL is not known at this point. We need to get it from the AzureML "Workspace - Get" REST API call. It will have + # the form: https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} + _endpoint0 = f"https://management.azure.com" # pylint: disable=line-too-long + self._config0 = AIProjectClientConfiguration( + endpoint=endpoint, + subscription_id=subscription_id, + resource_group_name=resource_group_name, + project_name=project_name, + credential=credential, + api_version="2020-02-02", + credential_scopes=["https://management.azure.com"], + **kwargs0, + ) + + _policies0 = kwargs0.pop("policies", None) + if _policies0 is None: + _policies0 = [ + policies.RequestIdPolicy(**kwargs0), + self._config0.headers_policy, + self._config0.user_agent_policy, + self._config0.proxy_policy, + policies.ContentDecodePolicy(**kwargs0), + self._config0.redirect_policy, + self._config0.retry_policy, + self._config0.authentication_policy, + self._config0.custom_hook_policy, + self._config0.logging_policy, + policies.DistributedTracingPolicy(**kwargs0), + policies.SensitiveHeaderCleanupPolicy(**kwargs0) if self._config0.redirect_policy else None, + self._config0.http_logging_policy, + ] + self._client0 = AsyncPipelineClient(base_url=_endpoint0, policies=_policies0, **kwargs0) + # For Endpoints operations (enumerating connections, getting SAS tokens) _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long self._config1 = AIProjectClientConfiguration( @@ -146,23 +181,29 @@ def __init__( self._deserialize = Deserializer() self._serialize.client_side_validation = False + self.diagnostics = DiagnosticsOperations( + self._client0, self._config0, self._serialize, self._deserialize, outer_instance=self + ) self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) self.agents = AgentsOperations(self._client2, self._config2, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client3, self._config3, self._serialize, self._deserialize) self.inference = InferenceOperations(self) async def close(self) -> None: + await self._client0.close() await self._client1.close() await self._client2.close() await self._client3.close() async def __aenter__(self) -> Self: + await self._client0.__aenter__() await self._client1.__aenter__() await self._client2.__aenter__() await self._client3.__aenter__() return self async def __aexit__(self, *exc_details: Any) -> None: + await self._client0.__aexit__(*exc_details) await self._client1.__aexit__(*exc_details) await self._client2.__aexit__(*exc_details) await self._client3.__aexit__(*exc_details) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/__init__.py index 35cf92df96bc..d6c4708ca4c0 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/__init__.py @@ -14,6 +14,7 @@ from ._operations import AgentsOperations # type: ignore from ._operations import ConnectionsOperations # type: ignore +from ._operations import DiagnosticsOperations # type: ignore from ._operations import EvaluationsOperations # type: ignore from ._patch import __all__ as _patch_all @@ -23,6 +24,7 @@ __all__ = [ "AgentsOperations", "ConnectionsOperations", + "DiagnosticsOperations", "EvaluationsOperations", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index 1279baa85261..7b0a6dd2a28a 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -73,9 +73,11 @@ build_agents_update_run_request, build_agents_update_thread_request, build_agents_upload_file_request, - build_connections_get_request, - build_connections_list_request, - build_connections_list_secrets_request, + build_connections_get_connection_request, + build_connections_get_connection_with_secrets_request, + build_connections_get_workspace_request, + build_connections_list_connections_request, + build_diagnostics_get_app_insights_request, build_evaluations_create_or_replace_schedule_request, build_evaluations_create_request, build_evaluations_delete_schedule_request, @@ -4965,14 +4967,78 @@ def __init__(self, *args, **kwargs) -> None: self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async - async def _list( + async def _get_workspace(self, **kwargs: Any) -> _models._models.GetWorkspaceResponse: + """Gets the properties of the specified machine learning workspace. + + :return: GetWorkspaceResponse. The GetWorkspaceResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models._models.GetWorkspaceResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models._models.GetWorkspaceResponse] = kwargs.pop("cls", None) + + _request = build_connections_get_workspace_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.GetWorkspaceResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def _list_connections( self, *, category: Optional[Union[str, _models.ConnectionType]] = None, include_all: Optional[bool] = None, target: Optional[str] = None, **kwargs: Any - ) -> _models._models.ConnectionsListResponse: + ) -> _models._models.ListConnectionsResponse: """List the details of all the connections (not including their credentials). :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", @@ -4983,8 +5049,8 @@ async def _list( :paramtype include_all: bool :keyword target: Target of the workspace connection. Default value is None. :paramtype target: str - :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models._models.ConnectionsListResponse + :return: ListConnectionsResponse. The ListConnectionsResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models._models.ListConnectionsResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -4998,9 +5064,9 @@ async def _list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) + cls: ClsType[_models._models.ListConnectionsResponse] = kwargs.pop("cls", None) - _request = build_connections_list_request( + _request = build_connections_list_connections_request( category=category, include_all=include_all, target=target, @@ -5038,7 +5104,7 @@ async def _list( deserialized = response.iter_bytes() else: deserialized = _deserialize( - _models._models.ConnectionsListResponse, response.json() # pylint: disable=protected-access + _models._models.ListConnectionsResponse, response.json() # pylint: disable=protected-access ) if cls: @@ -5047,14 +5113,13 @@ async def _list( return deserialized # type: ignore @distributed_trace_async - async def _get(self, connection_name: str, **kwargs: Any) -> _models._models.ConnectionsListSecretsResponse: + async def _get_connection(self, connection_name: str, **kwargs: Any) -> _models._models.GetConnectionResponse: """Get the details of a single connection, without credentials. :param connection_name: Connection Name. Required. :type connection_name: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models._models.ConnectionsListSecretsResponse + :return: GetConnectionResponse. The GetConnectionResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models._models.GetConnectionResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -5068,9 +5133,9 @@ async def _get(self, connection_name: str, **kwargs: Any) -> _models._models.Con _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + cls: ClsType[_models._models.GetConnectionResponse] = kwargs.pop("cls", None) - _request = build_connections_get_request( + _request = build_connections_get_connection_request( connection_name=connection_name, api_version=self._config.api_version, headers=_headers, @@ -5106,7 +5171,7 @@ async def _get(self, connection_name: str, **kwargs: Any) -> _models._models.Con deserialized = response.iter_bytes() else: deserialized = _deserialize( - _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + _models._models.GetConnectionResponse, response.json() # pylint: disable=protected-access ) if cls: @@ -5115,22 +5180,22 @@ async def _get(self, connection_name: str, **kwargs: Any) -> _models._models.Con return deserialized # type: ignore @overload - async def _list_secrets( + async def _get_connection_with_secrets( self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... + ) -> _models._models.GetConnectionResponse: ... @overload - async def _list_secrets( + async def _get_connection_with_secrets( self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... + ) -> _models._models.GetConnectionResponse: ... @overload - async def _list_secrets( + async def _get_connection_with_secrets( self, connection_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... + ) -> _models._models.GetConnectionResponse: ... @distributed_trace_async - async def _list_secrets( + async def _get_connection_with_secrets( self, connection_name: str, body: Union[JSON, IO[bytes]] = _Unset, *, ignored: str = _Unset, **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: + ) -> _models._models.GetConnectionResponse: """Get the details of a single connection, including credentials (if available). :param connection_name: Connection Name. Required. @@ -5139,9 +5204,8 @@ async def _list_secrets( :type body: JSON or IO[bytes] :keyword ignored: The body is ignored. TODO: Can we remove this?. Required. :paramtype ignored: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models._models.ConnectionsListSecretsResponse + :return: GetConnectionResponse. The GetConnectionResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models._models.GetConnectionResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -5156,7 +5220,7 @@ async def _list_secrets( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + cls: ClsType[_models._models.GetConnectionResponse] = kwargs.pop("cls", None) if body is _Unset: if ignored is _Unset: @@ -5170,7 +5234,7 @@ async def _list_secrets( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_connections_list_secrets_request( + _request = build_connections_get_connection_with_secrets_request( connection_name=connection_name, content_type=content_type, api_version=self._config.api_version, @@ -5208,7 +5272,7 @@ async def _list_secrets( deserialized = response.iter_bytes() else: deserialized = _deserialize( - _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + _models._models.GetConnectionResponse, response.json() # pylint: disable=protected-access ) if cls: @@ -5217,6 +5281,93 @@ async def _list_secrets( return deserialized # type: ignore +class DiagnosticsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.aio.AIProjectClient`'s + :attr:`diagnostics` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def get_app_insights(self, app_insights_resource_url: str, **kwargs: Any) -> _models.GetAppInsightsResponse: + # pylint: disable=line-too-long + """Gets the properties of the specified Application Insights resource. + + :param app_insights_resource_url: The AppInsights Azure resource Url. It should have the + format: + ``/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/microsoft.insights/components/{resource-name}``. + Required. + :type app_insights_resource_url: str + :return: GetAppInsightsResponse. The GetAppInsightsResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.GetAppInsightsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.GetAppInsightsResponse] = kwargs.pop("cls", None) + + _request = build_diagnostics_get_app_insights_request( + app_insights_resource_url=app_insights_resource_url, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.GetAppInsightsResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + class EvaluationsOperations: """ .. warning:: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 7ef50e724fba..e046109ac356 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -1,5 +1,4 @@ # pylint: disable=too-many-lines -# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -18,9 +17,15 @@ from azure.ai.projects import _types from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated from ._operations import AgentsOperations as AgentsOperationsGenerated +from ._operations import DiagnosticsOperations as DiagnosticsOperationsGenerated from ...models._patch import ConnectionProperties from ...models._enums import AuthenticationType, ConnectionType, FilePurpose -from ...models._models import ConnectionsListSecretsResponse, ConnectionsListResponse +from ...models._models import ( + GetConnectionResponse, + ListConnectionsResponse, + GetAppInsightsResponse, + GetWorkspaceResponse, +) from ... import models as _models from azure.core.tracing.decorator_async import distributed_trace_async @@ -33,7 +38,7 @@ class InferenceOperations: def __init__(self, outer_instance): - self.outer_instance = outer_instance + self._outer_instance = outer_instance @distributed_trace_async async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": @@ -46,7 +51,7 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) - connection = await self.outer_instance.connections.get_default( + connection = await self._outer_instance.connections.get_default( connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs ) if not connection: @@ -98,7 +103,7 @@ async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) - connection = await self.outer_instance.connections.get_default( + connection = await self._outer_instance.connections.get_default( connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs ) if not connection: @@ -138,16 +143,21 @@ async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": return client @distributed_trace_async - async def get_azure_openai_client(self, **kwargs) -> "AsyncAzureOpenAI": + async def get_azure_openai_client(self, *, api_version: str | None = None, **kwargs) -> "AsyncAzureOpenAI": """Get an authenticated AsyncAzureOpenAI client (from the `openai` package) for the default Azure OpenAI connection. The package `openai` must be installed prior to calling this method. + :keyword api_version: The Azure OpenAI api-version to use when creating the client. Optional. + See "Data plane - Inference" row in the table at + https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs. If this keyword + is not specified, you must set the environment variable `OPENAI_API_VERSION` instead. + :paramtype api_version: str :return: An authenticated AsyncAzureOpenAI client :rtype: ~openai.AsyncAzureOpenAI :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) - connection = await self.outer_instance.connections.get_default( + connection = await self._outer_instance.connections.get_default( connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs ) if not connection: @@ -158,16 +168,12 @@ async def get_azure_openai_client(self, **kwargs) -> "AsyncAzureOpenAI": except ModuleNotFoundError as _: raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai-async'") - # Pick latest GA version from the "Data plane - Inference" row in the table - # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - AZURE_OPENAI_API_VERSION = "2024-06-01" - if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" ) client = AsyncAzureOpenAI( - api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION + api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=api_version ) elif connection.authentication_type == AuthenticationType.AAD: logger.debug( @@ -185,7 +191,7 @@ async def get_azure_openai_client(self, **kwargs) -> "AsyncAzureOpenAI": connection.token_credential, "https://cognitiveservices.azure.com/.default" ), azure_endpoint=connection.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION, + api_version=api_version, ) elif connection.authentication_type == AuthenticationType.SAS: logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") @@ -194,7 +200,7 @@ async def get_azure_openai_client(self, **kwargs) -> "AsyncAzureOpenAI": connection.token_credential, "https://cognitiveservices.azure.com/.default" ), azure_endpoint=connection.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION, + api_version=api_version, ) else: raise ValueError("Unknown authentication type") @@ -252,7 +258,7 @@ async def get(self, *, connection_name: str, with_credentials: bool = False, **k if not connection_name: raise ValueError("Endpoint name cannot be empty") if with_credentials: - connection: ConnectionsListSecretsResponse = await self._list_secrets( + connection: GetConnectionResponse = await self._get_connection_with_secrets( connection_name=connection_name, ignored="ignore", **kwargs ) if connection.properties.auth_type == AuthenticationType.AAD: @@ -272,7 +278,9 @@ async def get(self, *, connection_name: str, with_credentials: bool = False, **k return ConnectionProperties(connection=connection) else: - return ConnectionProperties(connection=await self._get(connection_name=connection_name, **kwargs)) + return ConnectionProperties( + connection=await self._get_connection(connection_name=connection_name, **kwargs) + ) @distributed_trace_async async def list( @@ -288,7 +296,7 @@ async def list( :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) - connections_list: ConnectionsListResponse = await self._list( + connections_list: ListConnectionsResponse = await self._list_connections( include_all=True, category=connection_type, **kwargs ) @@ -300,6 +308,47 @@ async def list( return connection_properties_list +class DiagnosticsOperations(DiagnosticsOperationsGenerated): + + connection_string: Optional[str] = None + """ Application Insights connection string. Call `enable()` to populate this property. """ + + def __init__(self, *args, **kwargs): + self._outer_instance = kwargs.pop("outer_instance") + super().__init__(*args, **kwargs) + + @distributed_trace_async + async def enable(self, **kwargs) -> bool: + """Enable Application Insights tracing. + This method makes service calls to get the properties of the Applications Insights resource + connected to the Azure AI Studio Project. If Application Insights was not enabled for this project, + this method will return False. Otherwise, it will return True. In this case the Application Insights + connection string can be accessed via the `.diagnostics.connection_string` property. + + :return: True if Application Insights tracing was enabled. False otherwise. + :rtype: bool + """ + + if not self.connection_string: + # Get the AI Studio Project properties + get_workspace_response: GetWorkspaceResponse = await self._outer_instance.connections._get_workspace() + + # No Application Insights resource was enabled for this Project + if not get_workspace_response.properties.application_insights: + return False + + app_insights_respose: GetAppInsightsResponse = await self.get_app_insights( + app_insights_resource_url=get_workspace_response.properties.application_insights + ) + + if not app_insights_respose.properties.connection_string: + raise ValueError("Application Insights resource does not have a connection string") + + self.connection_string = app_insights_respose.properties.connection_string + + return True + + class AgentsOperations(AgentsOperationsGenerated): @overload @@ -1964,6 +2013,7 @@ async def create_vector_store_file_batch_and_poll( __all__: List[str] = [ "AgentsOperations", "ConnectionsOperations", + "DiagnosticsOperations", "InferenceOperations", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py index f6ed04e4637b..933a62c1e5b6 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py @@ -20,6 +20,7 @@ AgentThreadCreationOptions, AgentsApiResponseFormat, AgentsNamedToolChoice, + AppInsightsProperties, ApplicationInsightsConfiguration, AzureAISearchResource, AzureAISearchToolDefinition, @@ -42,6 +43,7 @@ FunctionDefinition, FunctionName, FunctionToolDefinition, + GetAppInsightsResponse, IndexResource, InputData, MessageAttachment, @@ -203,6 +205,7 @@ "AgentThreadCreationOptions", "AgentsApiResponseFormat", "AgentsNamedToolChoice", + "AppInsightsProperties", "ApplicationInsightsConfiguration", "AzureAISearchResource", "AzureAISearchToolDefinition", @@ -225,6 +228,7 @@ "FunctionDefinition", "FunctionName", "FunctionToolDefinition", + "GetAppInsightsResponse", "IndexResource", "InputData", "MessageAttachment", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 27241a07547e..35470d11723c 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -369,6 +369,35 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) +class AppInsightsProperties(_model_base.Model): + """The properties of the Application Insights resource. + + + :ivar connection_string: Authentication type of the connection target. Required. + :vartype connection_string: str + """ + + connection_string: str = rest_field(name="ConnectionString") + """Authentication type of the connection target. Required.""" + + @overload + def __init__( + self, + *, + connection_string: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + class InputData(_model_base.Model): """Abstract data class for input data configuration. @@ -795,38 +824,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ConnectionsListResponse(_model_base.Model): - """Response from the list operation. - - - :ivar value: A list of connection list secrets. Required. - :vartype value: list[~azure.ai.projects.models._models.ConnectionsListSecretsResponse] - """ - - value: List["_models._models.ConnectionsListSecretsResponse"] = rest_field() - """A list of connection list secrets. Required.""" - - -class ConnectionsListSecretsResponse(_model_base.Model): - """Response from the listSecrets operation. - - - :ivar id: A unique identifier for the connection. Required. - :vartype id: str - :ivar name: The name of the resource. Required. - :vartype name: str - :ivar properties: The properties of the resource. Required. - :vartype properties: ~azure.ai.projects.models._models.ConnectionProperties - """ - - id: str = rest_field() - """A unique identifier for the connection. Required.""" - name: str = rest_field() - """The name of the resource. Required.""" - properties: "_models._models.ConnectionProperties" = rest_field() - """The properties of the resource. Required.""" - - class CredentialsApiKeyAuth(_model_base.Model): """The credentials needed for API key authentication. @@ -1458,6 +1455,85 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, type="function", **kwargs) +class GetAppInsightsResponse(_model_base.Model): + """Response from getting properties of the Application Insights resource. + + + :ivar id: A unique identifier for the resource. Required. + :vartype id: str + :ivar name: The name of the resource. Required. + :vartype name: str + :ivar properties: The properties of the resource. Required. + :vartype properties: ~azure.ai.projects.models.AppInsightsProperties + """ + + id: str = rest_field() + """A unique identifier for the resource. Required.""" + name: str = rest_field() + """The name of the resource. Required.""" + properties: "_models.AppInsightsProperties" = rest_field() + """The properties of the resource. Required.""" + + @overload + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + name: str, + properties: "_models.AppInsightsProperties", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class GetConnectionResponse(_model_base.Model): + """Response from the listSecrets operation. + + + :ivar id: A unique identifier for the connection. Required. + :vartype id: str + :ivar name: The name of the resource. Required. + :vartype name: str + :ivar properties: The properties of the resource. Required. + :vartype properties: ~azure.ai.projects.models._models.ConnectionProperties + """ + + id: str = rest_field() + """A unique identifier for the connection. Required.""" + name: str = rest_field() + """The name of the resource. Required.""" + properties: "_models._models.ConnectionProperties" = rest_field() + """The properties of the resource. Required.""" + + +class GetWorkspaceResponse(_model_base.Model): + """Response from the Workspace - Get operation. + + + :ivar id: A unique identifier for the resource. Required. + :vartype id: str + :ivar name: The name of the resource. Required. + :vartype name: str + :ivar properties: The properties of the resource. Required. + :vartype properties: ~azure.ai.projects.models._models.WorkspaceProperties + """ + + id: str = rest_field() + """A unique identifier for the resource. Required.""" + name: str = rest_field() + """The name of the resource. Required.""" + properties: "_models._models.WorkspaceProperties" = rest_field() + """The properties of the resource. Required.""" + + class IndexResource(_model_base.Model): """A Index resource. @@ -1493,6 +1569,18 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) +class ListConnectionsResponse(_model_base.Model): + """Response from the list operation. + + + :ivar value: A list of connection list secrets. Required. + :vartype value: list[~azure.ai.projects.models._models.GetConnectionResponse] + """ + + value: List["_models._models.GetConnectionResponse"] = rest_field() + """A list of connection list secrets. Required.""" + + class MessageAttachment(_model_base.Model): """This describes to which tools a file has been attached. @@ -6104,3 +6192,15 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.STATIC, **kwargs) + + +class WorkspaceProperties(_model_base.Model): + """workspace properties. + + + :ivar application_insights: Authentication type of the connection target. Required. + :vartype application_insights: str + """ + + application_insights: str = rest_field(name="applicationInsights") + """Authentication type of the connection target. Required.""" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 1c85ce5b7385..a630946caa1a 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -1,3 +1,4 @@ +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -17,7 +18,7 @@ from ._enums import AgentStreamEvent, ConnectionType from ._models import ( - ConnectionsListSecretsResponse, + GetConnectionResponse, MessageDeltaChunk, SubmitToolOutputsAction, ThreadRun, @@ -95,7 +96,7 @@ class ConnectionProperties: :vartype token_credential: ~azure.core.credentials.TokenCredential """ - def __init__(self, *, connection: ConnectionsListSecretsResponse, token_credential: TokenCredential = None) -> None: + def __init__(self, *, connection: GetConnectionResponse, token_credential: TokenCredential = None) -> None: self.id = connection.id self.name = connection.name self.authentication_type = connection.properties.auth_type diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/__init__.py index 35cf92df96bc..d6c4708ca4c0 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/__init__.py @@ -14,6 +14,7 @@ from ._operations import AgentsOperations # type: ignore from ._operations import ConnectionsOperations # type: ignore +from ._operations import DiagnosticsOperations # type: ignore from ._operations import EvaluationsOperations # type: ignore from ._patch import __all__ as _patch_all @@ -23,6 +24,7 @@ __all__ = [ "AgentsOperations", "ConnectionsOperations", + "DiagnosticsOperations", "EvaluationsOperations", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index 29d195f38724..63aecae91335 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -1164,7 +1164,26 @@ def build_agents_list_vector_store_file_batch_files_request( # pylint: disable= return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_connections_list_request( +def build_connections_get_workspace_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_connections_list_connections_request( # pylint: disable=name-too-long *, category: Optional[Union[str, _models.ConnectionType]] = None, include_all: Optional[bool] = None, @@ -1195,7 +1214,7 @@ def build_connections_list_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_connections_get_request(connection_name: str, **kwargs: Any) -> HttpRequest: +def build_connections_get_connection_request(connection_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -1219,7 +1238,9 @@ def build_connections_get_request(connection_name: str, **kwargs: Any) -> HttpRe return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_connections_list_secrets_request(connection_name: str, **kwargs: Any) -> HttpRequest: +def build_connections_get_connection_with_secrets_request( # pylint: disable=name-too-long + connection_name: str, **kwargs: Any +) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -1246,6 +1267,32 @@ def build_connections_list_secrets_request(connection_name: str, **kwargs: Any) return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) +def build_diagnostics_get_app_insights_request( # pylint: disable=name-too-long + app_insights_resource_url: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/{appInsightsResourceUrl}" + path_format_arguments = { + "appInsightsResourceUrl": _SERIALIZER.url("app_insights_resource_url", app_insights_resource_url, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + def build_evaluations_get_request(id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -6314,14 +6361,78 @@ def __init__(self, *args, **kwargs): self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def _list( + def _get_workspace(self, **kwargs: Any) -> _models._models.GetWorkspaceResponse: + """Gets the properties of the specified machine learning workspace. + + :return: GetWorkspaceResponse. The GetWorkspaceResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models._models.GetWorkspaceResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models._models.GetWorkspaceResponse] = kwargs.pop("cls", None) + + _request = build_connections_get_workspace_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize( + _models._models.GetWorkspaceResponse, response.json() # pylint: disable=protected-access + ) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def _list_connections( self, *, category: Optional[Union[str, _models.ConnectionType]] = None, include_all: Optional[bool] = None, target: Optional[str] = None, **kwargs: Any - ) -> _models._models.ConnectionsListResponse: + ) -> _models._models.ListConnectionsResponse: """List the details of all the connections (not including their credentials). :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", @@ -6332,8 +6443,8 @@ def _list( :paramtype include_all: bool :keyword target: Target of the workspace connection. Default value is None. :paramtype target: str - :return: ConnectionsListResponse. The ConnectionsListResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models._models.ConnectionsListResponse + :return: ListConnectionsResponse. The ListConnectionsResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models._models.ListConnectionsResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -6347,9 +6458,9 @@ def _list( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models._models.ConnectionsListResponse] = kwargs.pop("cls", None) + cls: ClsType[_models._models.ListConnectionsResponse] = kwargs.pop("cls", None) - _request = build_connections_list_request( + _request = build_connections_list_connections_request( category=category, include_all=include_all, target=target, @@ -6387,7 +6498,7 @@ def _list( deserialized = response.iter_bytes() else: deserialized = _deserialize( - _models._models.ConnectionsListResponse, response.json() # pylint: disable=protected-access + _models._models.ListConnectionsResponse, response.json() # pylint: disable=protected-access ) if cls: @@ -6396,14 +6507,13 @@ def _list( return deserialized # type: ignore @distributed_trace - def _get(self, connection_name: str, **kwargs: Any) -> _models._models.ConnectionsListSecretsResponse: + def _get_connection(self, connection_name: str, **kwargs: Any) -> _models._models.GetConnectionResponse: """Get the details of a single connection, without credentials. :param connection_name: Connection Name. Required. :type connection_name: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models._models.ConnectionsListSecretsResponse + :return: GetConnectionResponse. The GetConnectionResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models._models.GetConnectionResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -6417,9 +6527,9 @@ def _get(self, connection_name: str, **kwargs: Any) -> _models._models.Connectio _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + cls: ClsType[_models._models.GetConnectionResponse] = kwargs.pop("cls", None) - _request = build_connections_get_request( + _request = build_connections_get_connection_request( connection_name=connection_name, api_version=self._config.api_version, headers=_headers, @@ -6455,7 +6565,7 @@ def _get(self, connection_name: str, **kwargs: Any) -> _models._models.Connectio deserialized = response.iter_bytes() else: deserialized = _deserialize( - _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + _models._models.GetConnectionResponse, response.json() # pylint: disable=protected-access ) if cls: @@ -6464,22 +6574,22 @@ def _get(self, connection_name: str, **kwargs: Any) -> _models._models.Connectio return deserialized # type: ignore @overload - def _list_secrets( + def _get_connection_with_secrets( self, connection_name: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... + ) -> _models._models.GetConnectionResponse: ... @overload - def _list_secrets( + def _get_connection_with_secrets( self, connection_name: str, *, ignored: str, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... + ) -> _models._models.GetConnectionResponse: ... @overload - def _list_secrets( + def _get_connection_with_secrets( self, connection_name: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: ... + ) -> _models._models.GetConnectionResponse: ... @distributed_trace - def _list_secrets( + def _get_connection_with_secrets( self, connection_name: str, body: Union[JSON, IO[bytes]] = _Unset, *, ignored: str = _Unset, **kwargs: Any - ) -> _models._models.ConnectionsListSecretsResponse: + ) -> _models._models.GetConnectionResponse: """Get the details of a single connection, including credentials (if available). :param connection_name: Connection Name. Required. @@ -6488,9 +6598,8 @@ def _list_secrets( :type body: JSON or IO[bytes] :keyword ignored: The body is ignored. TODO: Can we remove this?. Required. :paramtype ignored: str - :return: ConnectionsListSecretsResponse. The ConnectionsListSecretsResponse is compatible with - MutableMapping - :rtype: ~azure.ai.projects.models._models.ConnectionsListSecretsResponse + :return: GetConnectionResponse. The GetConnectionResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models._models.GetConnectionResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -6505,7 +6614,7 @@ def _list_secrets( _params = kwargs.pop("params", {}) or {} content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models._models.ConnectionsListSecretsResponse] = kwargs.pop("cls", None) + cls: ClsType[_models._models.GetConnectionResponse] = kwargs.pop("cls", None) if body is _Unset: if ignored is _Unset: @@ -6519,7 +6628,7 @@ def _list_secrets( else: _content = json.dumps(body, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_connections_list_secrets_request( + _request = build_connections_get_connection_with_secrets_request( connection_name=connection_name, content_type=content_type, api_version=self._config.api_version, @@ -6557,7 +6666,7 @@ def _list_secrets( deserialized = response.iter_bytes() else: deserialized = _deserialize( - _models._models.ConnectionsListSecretsResponse, response.json() # pylint: disable=protected-access + _models._models.GetConnectionResponse, response.json() # pylint: disable=protected-access ) if cls: @@ -6566,6 +6675,93 @@ def _list_secrets( return deserialized # type: ignore +class DiagnosticsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.ai.projects.AIProjectClient`'s + :attr:`diagnostics` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def get_app_insights(self, app_insights_resource_url: str, **kwargs: Any) -> _models.GetAppInsightsResponse: + # pylint: disable=line-too-long + """Gets the properties of the specified Application Insights resource. + + :param app_insights_resource_url: The AppInsights Azure resource Url. It should have the + format: + ``/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/microsoft.insights/components/{resource-name}``. + Required. + :type app_insights_resource_url: str + :return: GetAppInsightsResponse. The GetAppInsightsResponse is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.GetAppInsightsResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.GetAppInsightsResponse] = kwargs.pop("cls", None) + + _request = build_diagnostics_get_app_insights_request( + app_insights_resource_url=app_insights_resource_url, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str"), + "subscriptionId": self._serialize.url("self._config.subscription_id", self._config.subscription_id, "str"), + "resourceGroupName": self._serialize.url( + "self._config.resource_group_name", self._config.resource_group_name, "str" + ), + "projectName": self._serialize.url("self._config.project_name", self._config.project_name, "str"), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.GetAppInsightsResponse, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + class EvaluationsOperations: """ .. warning:: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index e166ab98f6c9..1df36b0cbac6 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -1,5 +1,4 @@ # pylint: disable=too-many-lines -# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -15,8 +14,14 @@ # from zoneinfo import ZoneInfo from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated from ._operations import AgentsOperations as AgentsOperationsGenerated +from ._operations import DiagnosticsOperations as DiagnosticsOperationsGenerated from ..models._enums import AuthenticationType, ConnectionType -from ..models._models import ConnectionsListSecretsResponse, ConnectionsListResponse +from ..models._models import ( + GetConnectionResponse, + ListConnectionsResponse, + GetAppInsightsResponse, + GetWorkspaceResponse, +) from .._types import AgentsApiResponseFormatOption from ..models._patch import ConnectionProperties from ..models._enums import FilePurpose @@ -43,7 +48,7 @@ class InferenceOperations: def __init__(self, outer_instance): - self.outer_instance = outer_instance + self._outer_instance = outer_instance @distributed_trace def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": @@ -56,7 +61,7 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) - connection = self.outer_instance.connections.get_default( + connection = self._outer_instance.connections.get_default( connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs ) if not connection: @@ -108,7 +113,7 @@ def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) - connection = self.outer_instance.connections.get_default( + connection = self._outer_instance.connections.get_default( connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs ) if not connection: @@ -150,16 +155,22 @@ def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": return client @distributed_trace - def get_azure_openai_client(self, **kwargs) -> "AzureOpenAI": + def get_azure_openai_client(self, *, api_version: str | None = None, **kwargs) -> "AzureOpenAI": """Get an authenticated AzureOpenAI client (from the `openai` package) for the default Azure OpenAI connection. The package `openai` must be installed prior to calling this method. + :keyword api_version: The Azure OpenAI api-version to use when creating the client. Optional. + See "Data plane - Inference" row in the table at + https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs. If this keyword + is not specified, you must set the environment variable `OPENAI_API_VERSION` instead. + :paramtype api_version: str :return: An authenticated AzureOpenAI client :rtype: ~openai.AzureOpenAI :raises ~azure.core.exceptions.HttpResponseError: """ + kwargs.setdefault("merge_span", True) - connection = self.outer_instance.connections.get_default( + connection = self._outer_instance.connections.get_default( connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs ) if not connection: @@ -170,16 +181,12 @@ def get_azure_openai_client(self, **kwargs) -> "AzureOpenAI": except ModuleNotFoundError as _: raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai'") - # Pick latest GA version from the "Data plane - Inference" row in the table - # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - AZURE_OPENAI_API_VERSION = "2024-06-01" - if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using API key authentication" ) client = AzureOpenAI( - api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=AZURE_OPENAI_API_VERSION + api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=api_version ) elif connection.authentication_type == AuthenticationType.AAD: logger.debug( @@ -197,7 +204,7 @@ def get_azure_openai_client(self, **kwargs) -> "AzureOpenAI": connection.token_credential, "https://cognitiveservices.azure.com/.default" ), azure_endpoint=connection.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION, + api_version=api_version, ) elif connection.authentication_type == AuthenticationType.SAS: logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") @@ -206,7 +213,7 @@ def get_azure_openai_client(self, **kwargs) -> "AzureOpenAI": connection.token_credential, "https://cognitiveservices.azure.com/.default" ), azure_endpoint=connection.endpoint_url, - api_version=AZURE_OPENAI_API_VERSION, + api_version=api_version, ) else: raise ValueError("Unknown authentication type") @@ -264,7 +271,7 @@ def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: if not connection_name: raise ValueError("Connection name cannot be empty") if with_credentials: - connection: ConnectionsListSecretsResponse = self._list_secrets( + connection: GetConnectionResponse = self._get_connection_with_secrets( connection_name=connection_name, ignored="ignore", **kwargs ) if connection.properties.auth_type == AuthenticationType.AAD: @@ -284,7 +291,7 @@ def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: return ConnectionProperties(connection=connection) else: - return ConnectionProperties(connection=self._get(connection_name=connection_name, **kwargs)) + return ConnectionProperties(connection=self._get_connection(connection_name=connection_name, **kwargs)) @distributed_trace def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) -> Iterable[ConnectionProperties]: @@ -298,7 +305,9 @@ def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) - connections_list: ConnectionsListResponse = self._list(include_all=True, category=connection_type, **kwargs) + connections_list: ListConnectionsResponse = self._list_connections( + include_all=True, category=connection_type, **kwargs + ) # Iterate to create the simplified result property connection_properties_list: List[ConnectionProperties] = [] @@ -308,6 +317,46 @@ def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) return connection_properties_list +class DiagnosticsOperations(DiagnosticsOperationsGenerated): + + connection_string: Optional[str] = None + """ Application Insights connection string. Call `enable()` to populate this property. """ + + def __init__(self, *args, **kwargs): + self._outer_instance = kwargs.pop("outer_instance") + super().__init__(*args, **kwargs) + + @distributed_trace + def enable(self, **kwargs) -> bool: + """Enable Application Insights tracing. + This method makes service calls to get the properties of the Applications Insights resource + connected to the Azure AI Studio Project. If Application Insights was not enabled for this project, + this method will return False. Otherwise, it will return True. In this case the Application Insights + connection string can be accessed via the `.diagnostics.connection_string` property. + + :return: True if Application Insights tracing was enabled. False otherwise. + :rtype: bool + """ + if not self.connection_string: + # Get the AI Studio Project properties + get_workspace_response: GetWorkspaceResponse = self._outer_instance.connections._get_workspace() + + # No Application Insights resource was enabled for this Project + if not get_workspace_response.properties.application_insights: + return False + + app_insights_respose: GetAppInsightsResponse = self.get_app_insights( + app_insights_resource_url=get_workspace_response.properties.application_insights + ) + + if not app_insights_respose.properties.connection_string: + raise ValueError("Application Insights resource does not have a connection string") + + self.connection_string = app_insights_respose.properties.connection_string + + return True + + class AgentsOperations(AgentsOperationsGenerated): @overload def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: @@ -1969,6 +2018,7 @@ def create_vector_store_file_batch_and_poll( __all__: List[str] = [ "AgentsOperations", "ConnectionsOperations", + "DiagnosticsOperations", "InferenceOperations", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env new file mode 100644 index 000000000000..648c37a65c54 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env @@ -0,0 +1,19 @@ +# +# Environment variables required for running tests +# + +# To run Connection tests: +AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING= + +# To run Inference tests: +AZURE_AI_PROJECTS_INFERENCE_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING} +AZURE_AI_PROJECTS_INFERENCE_TESTS_MODEL_DEPLOYMENT_NAME= + +# To run Diagnostics tests: +AZURE_AI_PROJECTS_DIAGNOSTICS_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING} + +# To run Agents tests: + +# To run Evaluation tests: + + diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py index 475873498017..0aaa0458dd0f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -24,7 +24,13 @@ from typing import Any from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models._models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.projects.models._models import ( + MessageDeltaChunk, + MessageDeltaTextContent, + RunStep, + ThreadMessage, + ThreadRun, +) from azure.ai.projects.models._patch import AsyncAgentEventHandler from azure.identity import DefaultAzureCredential diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py index 3b001d0c920d..0f5dbc6e9efa 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -40,7 +40,9 @@ async def main(): async with project_client: # upload a file and wait for it to be processed - file = await project_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) + file = await project_client.agents.upload_file_and_poll( + file_path="../product_info_1.md", purpose=FilePurpose.AGENTS + ) print(f"Uploaded file, file ID: {file.id}") # create a vector store with no file and wait for it to be processed @@ -80,17 +82,21 @@ async def main(): file_search_tool.remove_vector_store(vector_store.id) print(f"Removed vector store from file search, vector store ID: {vector_store.id}") - await project_client.agents.update_agent(assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources) + await project_client.agents.update_agent( + assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + ) print(f"Updated agent, agent ID: {agent.id}") thread = await project_client.agents.create_thread() print(f"Created thread, thread ID: {thread.id}") - message = await project_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?") + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) print(f"Created message, message ID: {message.id}") run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, run ID: {run.id}") + print(f"Created run, run ID: {run.id}") await project_client.agents.delete_file(file.id) print("Deleted file") diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py index 4686f3f9fc29..d7f19653554c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -41,7 +41,9 @@ async def main(): # upload a file and wait for it to be processed async with project_client: - file = await project_client.agents.upload_file_and_poll(file_path="../product_info_1.md", purpose=FilePurpose.AGENTS) + file = await project_client.agents.upload_file_and_poll( + file_path="../product_info_1.md", purpose=FilePurpose.AGENTS + ) # Create agent with file search tool agent = await project_client.agents.create_agent( diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py index 30776b1ebac6..16d65bec0e2e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py @@ -40,7 +40,9 @@ openai_file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") print(f"Uploaded file, file ID: {openai_file.id}") - openai_vectorstore = project_client.agents.create_vector_store_and_poll(file_ids=[openai_file.id], name="my_vectorstore") + openai_vectorstore = project_client.agents.create_vector_store_and_poll( + file_ids=[openai_file.id], name="my_vectorstore" + ) print(f"Created vector store, vector store ID: {openai_vectorstore.id}") # Create file search tool with resources diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index 41ab2ba587f5..ed159e206a48 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -23,7 +23,14 @@ import os from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import Agent, MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun +from azure.ai.projects.models import ( + Agent, + MessageDeltaChunk, + MessageDeltaTextContent, + RunStep, + ThreadMessage, + ThreadRun, +) from azure.ai.projects.models import AgentEventHandler from azure.ai.projects.operations import AgentsOperations from azure.identity import DefaultAzureCredential diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py index c5c0f5ec2072..08c717f089f0 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -78,17 +78,21 @@ file_search_tool.remove_vector_store(vector_store.id) print(f"Removed vector store from file search, vector store ID: {vector_store.id}") - await project_client.agents.update_agent(assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources) + await project_client.agents.update_agent( + assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + ) print(f"Updated agent, agent ID: {agent.id}") thread = project_client.agents.create_thread() print(f"Created thread, thread ID: {thread.id}") - message = project_client.agents.create_message(thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?") + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) print(f"Created message, message ID: {message.id}") run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) - print(f"Created run, run ID: {run.id}") + print(f"Created run, run ID: {run.id}") project_client.agents.delete_file(file.id) print("Deleted file") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py index 5d351d2f7810..b34f469cfd2f 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py @@ -41,12 +41,13 @@ # Upload data for evaluation # Service side fix needed to make this work # data_id = project_client.upload_file("./evaluate_test_data.jsonl") -data_id = "azureml://locations/eastus2/workspaces/faa79f3d-91b3-4ed5-afdc-4cc0fe13fb85/data/remote-evals-data/versions/3" +data_id = ( + "azureml://locations/eastus2/workspaces/faa79f3d-91b3-4ed5-afdc-4cc0fe13fb85/data/remote-evals-data/versions/3" +) default_connection = project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) - # Create an evaluation evaluation = Evaluation( display_name="Remote Evaluation", @@ -59,14 +60,14 @@ "relevance": EvaluatorConfiguration( id=RelevanceEvaluator.evaluator_id, init_params={ - "model_config": default_connection.to_evaluator_model_config(deployment_name="GPT-4-Prod", api_version="2024-08-01-preview") + "model_config": default_connection.to_evaluator_model_config( + deployment_name="GPT-4-Prod", api_version="2024-08-01-preview" + ) }, ), "hate_unfairness": EvaluatorConfiguration( id=HateUnfairnessEvaluator.evaluator_id, - init_params={ - "azure_ai_project": project_client.scope - }, + init_params={"azure_ai_project": project_client.scope}, ), }, # This is needed as a workaround until environment gets published to registry diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py index 5f24696dcb19..e4ad81dac368 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py @@ -1,8 +1,18 @@ from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential -from azure.ai.projects.models import ApplicationInsightsConfiguration, EvaluatorConfiguration, SamplingStrategy, EvaluationSchedule, CronTrigger, RecurrenceTrigger, Frequency, RecurrenceSchedule - +from azure.ai.projects.models import ( + ApplicationInsightsConfiguration, + EvaluatorConfiguration, + SamplingStrategy, + EvaluationSchedule, + CronTrigger, + RecurrenceTrigger, + Frequency, + RecurrenceSchedule, +) + + def main(): # Project Configuration Canary Subscription = "72c03bf3-4e69-41af-9532-dfcdc3eefef4" @@ -21,31 +31,28 @@ def main(): app_insights_config = ApplicationInsightsConfiguration( resource_id="/subscriptions/72c03bf3-4e69-41af-9532-dfcdc3eefef4/resourceGroups/apeddau-rg-centraluseuap/providers/Microsoft.insights/components/apeddauwscentr0026977484", - query="traces | where message contains \"\"", - service_name="sample_service_name" + query='traces | where message contains ""', + service_name="sample_service_name", ) - + f1_evaluator_config = EvaluatorConfiguration( id="azureml://registries/model-evaluation-dev-01/models/F1ScoreEval/versions/1", - init_params={ - "column_mapping": { - "response": "${data.message}", - "ground_truth": "${data.itemType}" - } - } + init_params={"column_mapping": {"response": "${data.message}", "ground_truth": "${data.itemType}"}}, ) - + recurrence_trigger = RecurrenceTrigger(frequency="daily", interval=1) evaluators = { "f1_score": f1_evaluator_config, } - + sampling_strategy = SamplingStrategy(rate=0.2) name = "CANARY-ONLINE-EVAL-TEST-WS-ENV-104" description = "Testing Online eval command job in CANARY environment" tags = {"tag1": "value1", "tag2": "value2"} - properties = {"Environment": "azureml://registries/apeddau-online-evals-registry/environments/online-eval-env/versions/1"} - + properties = { + "Environment": "azureml://registries/apeddau-online-evals-registry/environments/online-eval-env/versions/1" + } + evaluation_schedule = EvaluationSchedule( data=app_insights_config, evaluators=evaluators, @@ -53,7 +60,7 @@ def main(): sampling_strategy=sampling_strategy, description=description, tags=tags, - properties=properties + properties=properties, ) evaluation_schedule = ai_client.evaluations.create_or_replace_schedule(name, evaluation_schedule) @@ -67,9 +74,10 @@ def main(): # Sample for list evaluation schedules for evaluation_schedule in ai_client.evaluations.list_schedule(): print(evaluation_schedule) - + # Sample for delete an evaluation schedule with name ai_client.evaluations.delete_schedule(name) - + + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_get_azure_openai_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_get_azure_openai_client.py index b4a7e6939385..e2dc15e93c59 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_get_azure_openai_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_get_azure_openai_client.py @@ -30,7 +30,7 @@ ) as project_client: # Get an authenticated OpenAI client for your default Azure OpenAI connection: - with project_client.inference.get_azure_openai_client() as client: + with project_client.inference.get_azure_openai_client(api_version="2024-06-01") as client: response = client.chat.completions.create( model="gpt-4-0613", diff --git a/sdk/ai/azure-ai-project/samples/inference/sample_get_chat_completions_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client_with_tracing.py similarity index 67% rename from sdk/ai/azure-ai-project/samples/inference/sample_get_chat_completions_client.py rename to sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client_with_tracing.py index 03e1da53a60a..ca86b7a126de 100644 --- a/sdk/ai/azure-ai-project/samples/inference/sample_get_chat_completions_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client_with_tracing.py @@ -4,24 +4,24 @@ # ------------------------------------ """ -FILE: sample_get_chat_completions_client.py +FILE: sample_get_chat_completions_client_with_tracing.py DESCRIPTION: Given an AIProjectClient, this sample demonstrates how to get an authenticated async ChatCompletionsClient from the azure.ai.inference package. USAGE: - python sample_get_chat_completions_client.py + python sample_get_chat_completions_client_with_tracing.py Before running the sample: - pip install azure.ai.project azure-identity + pip install azure.ai.projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os -from azure.ai.project import AIProjectClient +from azure.ai.projects import AIProjectClient from azure.ai.inference.models import UserMessage from azure.identity import DefaultAzureCredential @@ -30,6 +30,14 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as project_client: + + if not project_client.diagnostics.enable(): + print("Application Insights was not enabled for this project.") + print("Enable it via the 'Tracing' tab under 'Tools', in your AI Studio project page.") + exit() + + print(f"Applications Insights connection string = {project_client.diagnostics.connection_string}") + # Get an authenticated azure.ai.inference chat completions client for your default Serverless connection: with project_client.inference.get_chat_completions_client() as client: diff --git a/sdk/ai/azure-ai-projects/tests/README.md b/sdk/ai/azure-ai-projects/tests/README.md index fe61558eb116..cddf5f0e6ff2 100644 --- a/sdk/ai/azure-ai-projects/tests/README.md +++ b/sdk/ai/azure-ai-projects/tests/README.md @@ -30,25 +30,10 @@ az login ## Setup for running tests in the `agents` folder -```bash -set PROJECT_CONNECTION_STRING_AGENTS_TESTS= -``` - ## Setup for running tests in the `evaluations` folder ## Setup for running tests in the `connections` and `inference` folders -You need an Azure AI Project that has the following: - -TODO - -Copy the `Project connection string` from the Azure AI Studio and set the following environment variable: - -```bash -set AZURE_AI_PROJECTS_CONNECTIONS_TEST_PROJECT_CONNECTION_STRING= -set AZURE_AI_PROJECTS_CONNECTIONS_TEST_MODEL_DEPLOYMENT_NAME= -``` - ## Configure test proxy Configure the test proxy to run live service tests without recordings: diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_deserialization.py b/sdk/ai/azure-ai-projects/tests/agents/test_deserialization.py index d164d514f443..ed1de7ab7ed9 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_deserialization.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_deserialization.py @@ -9,6 +9,7 @@ from azure.ai.projects.models._models import ThreadRun, RunStep, ThreadMessage from azure.ai.projects.models._patch import _safe_instantiate, _filter_parameters + class TestDeserialization: """Tests for deserialization of sse responses.""" diff --git a/sdk/ai/azure-ai-projects/tests/conftest.py b/sdk/ai/azure-ai-projects/tests/conftest.py index d944cdf86007..95b7f817c771 100644 --- a/sdk/ai/azure-ai-projects/tests/conftest.py +++ b/sdk/ai/azure-ai-projects/tests/conftest.py @@ -4,8 +4,11 @@ # ------------------------------------ import pytest -from devtools_testutils import test_proxy, remove_batch_sanitizers +from devtools_testutils import remove_batch_sanitizers +from dotenv import load_dotenv, find_dotenv +if not load_dotenv(find_dotenv(filename="azure_ai_projects_tests.env"), override=True): + print("Failed to apply environment variables for azure-ai-projects tests.") # autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method @pytest.fixture(scope="session", autouse=True) diff --git a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py index b9168414a6ae..b37415126e4f 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py @@ -5,18 +5,18 @@ import sys import logging import functools +from dotenv import load_dotenv from azure.ai.projects import AIProjectClient -from azure.identity import DefaultAzureCredential from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader """ Set these environment variables before running the test: -set AZURE_AI_PROJECTS_CONNECTIONS_TEST_PROJECT_CONNECTION_STRING= +set AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING= """ servicePreparerConnectionsTests = functools.partial( EnvironmentVariableLoader, - "azure_ai_projects_connections_test", - azure_ai_projects_connections_test_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;hub-name", + "azure_ai_projects_connections_tests", + azure_ai_projects_connections_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;hub-name", ) @@ -33,14 +33,14 @@ handler = logging.StreamHandler(stream=sys.stdout) logger.addHandler(handler) + class ConnectionsTestBase(AzureRecordedTestCase): def get_sync_client(self, **kwargs) -> AIProjectClient: - conn_str = kwargs.pop("azure_ai_projects_connections_test_project_connection_string") + conn_str = kwargs.pop("azure_ai_projects_connections_tests_project_connection_string") project_client = AIProjectClient.from_connection_string( credential=self.get_credential(AIProjectClient, is_async=False), conn_str=conn_str, - logging_enable=LOGGING_ENABLED + logging_enable=LOGGING_ENABLED, ) return project_client - diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py index 09b99d2f9139..d105de6c9c2a 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py @@ -6,6 +6,7 @@ from devtools_testutils import recorded_by_proxy from connection_test_base import ConnectionsTestBase, servicePreparerConnectionsTests + # The test class name needs to start with "Test" to get collected by pytest class TestConnections(ConnectionsTestBase): diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py index a6dfb5843f99..599ee2a8a161 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py @@ -8,6 +8,7 @@ from azure.core.exceptions import HttpResponseError from connection_test_base import ConnectionsTestBase + class FakeTokenCredential(TokenCredential): def get_token(self, *scopes, **kwargs): # Create a fake token with an expiration time @@ -57,7 +58,7 @@ def test_sas_token_credential_class_mocked(self, **kwargs): sas_token=sas_token, credential=FakeTokenCredential(), subscription_id="fake_subscription_id", - resource_group_name="fake_resouce_group", + resource_group_name="fake_resource_group", project_name="fake_project_name", connection_name="fake_connection_name", ) @@ -95,4 +96,3 @@ def test_sas_token_credential_class_real(self, **kwargs): print(f"[TEST] Actual expiration date: {sas_token_credential._expires_on}") assert sas_token_credential._expires_on == expiration_datatime_utc - diff --git a/sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py b/sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py new file mode 100644 index 000000000000..ce70bc4f52de --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py @@ -0,0 +1,60 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import re +import sys +import logging +import functools +from azure.ai.projects import AIProjectClient +from azure.ai.projects.aio import AIProjectClient as AIProjectClientAsync +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader + +""" +Set these environment variables before running the test: +set AZURE_AI_PROJECTS_DIAGNOSTICS_TEST_PROJECT_CONNECTION_STRING= +""" +servicePreparerDiagnosticsTests = functools.partial( + EnvironmentVariableLoader, + "azure_ai_projects_diagnostics_test", + azure_ai_projects_diagnostics_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;hub-name", +) + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + + +class DiagnosticsTestBase(AzureRecordedTestCase): + + # Regular expression describing the pattern of an Application Insights connection string. + REGEX_APPINSIGHTS_CONNECTION_STRING = re.compile( + r"^InstrumentationKey=[0-9a-fA-F-]{36};IngestionEndpoint=https://.+.applicationinsights.azure.com/;LiveEndpoint=https://.+.monitor.azure.com/;ApplicationId=[0-9a-fA-F-]{36}$" + ) + + def get_sync_client(self, **kwargs) -> AIProjectClient: + conn_str = kwargs.pop("azure_ai_projects_diagnostics_tests_project_connection_string") + project_client = AIProjectClient.from_connection_string( + credential=self.get_credential(AIProjectClient, is_async=False), + conn_str=conn_str, + logging_enable=LOGGING_ENABLED, + ) + return project_client + + def get_async_client(self, **kwargs) -> AIProjectClient: + conn_str = kwargs.pop("azure_ai_projects_diagnostics_tests_project_connection_string") + project_client = AIProjectClientAsync.from_connection_string( + credential=self.get_credential(AIProjectClientAsync, is_async=False), + conn_str=conn_str, + logging_enable=LOGGING_ENABLED, + ) + return project_client \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py new file mode 100644 index 000000000000..1d71fa8a1374 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py @@ -0,0 +1,21 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from devtools_testutils import recorded_by_proxy +from diagnostics_test_base import DiagnosticsTestBase, servicePreparerDiagnosticsTests + + +# The test class name needs to start with "Test" to get collected by pytest +class TestDiagnostics(DiagnosticsTestBase): + + @servicePreparerDiagnosticsTests() + @recorded_by_proxy + def test_diagnostics(self, **kwargs): + with self.get_sync_client(**kwargs) as project_client: + assert project_client.diagnostics.connection_string == None + assert project_client.diagnostics.enable() == True + assert project_client.diagnostics.connection_string is not None + assert bool(DiagnosticsTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match(project_client.diagnostics.connection_string)) + assert project_client.diagnostics.enable() == True \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py new file mode 100644 index 000000000000..c68854a76926 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py @@ -0,0 +1,21 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from devtools_testutils.aio import recorded_by_proxy_async +from diagnostics_test_base import DiagnosticsTestBase, servicePreparerDiagnosticsTests + + +# The test class name needs to start with "Test" to get collected by pytest +class TestDiagnosticsAsync(DiagnosticsTestBase): + + @servicePreparerDiagnosticsTests() + @recorded_by_proxy_async + async def test_diagnostics_async(self, **kwargs): + async with self.get_async_client(**kwargs) as project_client: + assert project_client.diagnostics.connection_string == None + assert await project_client.diagnostics.enable() == True + assert project_client.diagnostics.connection_string is not None + assert bool(DiagnosticsTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match(project_client.diagnostics.connection_string)) + assert await project_client.diagnostics.enable() == True diff --git a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py index efaf63eddfc2..e9f99199fedd 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py @@ -11,14 +11,14 @@ """ Set these environment variables before running the test: -set AZURE_AI_PROJECTS_CONNECTIONS_TEST_PROJECT_CONNECTION_STRING= -set AZURE_AI_PROJECTS_CONNECTIONS_TEST_MODEL_DEPLOYMENT_NAME= +set AZURE_AI_PROJECTS_INFERENCE_TESTS_PROJECT_CONNECTION_STRING= +set AZURE_AI_PROJECTS_INFERENCE_TESTS_MODEL_DEPLOYMENT_NAME= """ servicePreparerInferenceTests = functools.partial( EnvironmentVariableLoader, - "azure_ai_projects_connections_test", - azure_ai_projects_connections_test_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;hub-name", - azure_ai_projects_connections_test_model_deployment_name="model-deployment-name", + "azure_ai_projects_inference_tests", + azure_ai_projects_inference_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;hub-name", + azure_ai_projects_inference_tests_model_deployment_name="model-deployment-name", ) @@ -35,22 +35,23 @@ handler = logging.StreamHandler(stream=sys.stdout) logger.addHandler(handler) + class InferenceTestBase(AzureRecordedTestCase): def get_sync_client(self, **kwargs) -> AIProjectClient: - conn_str = kwargs.pop("azure_ai_projects_connections_test_project_connection_string") + conn_str = kwargs.pop("azure_ai_projects_inference_tests_project_connection_string") project_client = AIProjectClient.from_connection_string( credential=self.get_credential(AIProjectClient, is_async=False), conn_str=conn_str, - logging_enable=LOGGING_ENABLED + logging_enable=LOGGING_ENABLED, ) return project_client def get_async_client(self, **kwargs) -> AIProjectClient: - conn_str = kwargs.pop("azure_ai_projects_connections_test_project_connection_string") + conn_str = kwargs.pop("azure_ai_projects_inference_tests_project_connection_string") project_client = AIProjectClientAsync.from_connection_string( credential=self.get_credential(AIProjectClientAsync, is_async=False), conn_str=conn_str, - logging_enable=LOGGING_ENABLED + logging_enable=LOGGING_ENABLED, ) return project_client diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py index 5afdfe938e7e..6b1ed698b653 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py @@ -7,15 +7,17 @@ from inference_test_base import InferenceTestBase, servicePreparerInferenceTests from azure.ai.inference.models import SystemMessage, UserMessage + # The test class name needs to start with "Test" to get collected by pytest class TestInference(InferenceTestBase): @servicePreparerInferenceTests() @recorded_by_proxy def test_inference_get_azure_openai_client(self, **kwargs): - model = kwargs.pop("azure_ai_projects_connections_test_model_deployment_name") + model = kwargs.pop("azure_ai_projects_inference_tests_model_deployment_name") with self.get_sync_client(**kwargs) as project_client: - with project_client.inference.get_azure_openai_client() as azure_openai_client: + # See API versions in https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + with project_client.inference.get_azure_openai_client(api_version="2024-10-01-preview") as azure_openai_client: response = azure_openai_client.chat.completions.create( messages=[ { @@ -26,7 +28,7 @@ def test_inference_get_azure_openai_client(self, **kwargs): model=model, ) pprint.pprint(response) - contains=["5280", "5,280"] + contains = ["5280", "5,280"] assert any(item in response.choices[0].message.content for item in contains) @servicePreparerInferenceTests() @@ -41,7 +43,7 @@ def test_inference_get_chat_completions_client(self, **kwargs): ] ) pprint.pprint(response) - contains=["5280", "5,280"] + contains = ["5280", "5,280"] assert any(item in response.choices[0].message.content for item in contains) @servicePreparerInferenceTests() diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py index d3ebbb792221..3273fe9849d7 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py @@ -7,15 +7,17 @@ from inference_test_base import InferenceTestBase, servicePreparerInferenceTests from azure.ai.inference.models import SystemMessage, UserMessage + # The test class name needs to start with "Test" to get collected by pytest class TestInferenceAsync(InferenceTestBase): - + @servicePreparerInferenceTests() @recorded_by_proxy_async async def test_inference_get_azure_openai_client_async(self, **kwargs): - model = kwargs.pop("azure_ai_projects_connections_test_model_deployment_name") + model = kwargs.pop("azure_ai_projects_inference_tests_model_deployment_name") async with self.get_async_client(**kwargs) as project_client: - async with await project_client.inference.get_azure_openai_client() as azure_openai_client: + # See API versions in https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + async with await project_client.inference.get_azure_openai_client(api_version="2024-10-01-preview") as azure_openai_client: response = await azure_openai_client.chat.completions.create( messages=[ { @@ -26,10 +28,9 @@ async def test_inference_get_azure_openai_client_async(self, **kwargs): model=model, ) pprint.pprint(response) - contains=["5280", "5,280"] + contains = ["5280", "5,280"] assert any(item in response.choices[0].message.content for item in contains) - @servicePreparerInferenceTests() @recorded_by_proxy_async async def test_inference_get_chat_completions_client_async(self, **kwargs): @@ -42,7 +43,7 @@ async def test_inference_get_chat_completions_client_async(self, **kwargs): ] ) pprint.pprint(response) - contains=["5280", "5,280"] + contains = ["5280", "5,280"] assert any(item in response.choices[0].message.content for item in contains) @servicePreparerInferenceTests() diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index 24bed3aa2376..b080c3792844 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: 6c50f709580b8ae61f69f6a7ccc243b5e01279d1 +commit: ff1c3a7b447adbea65e8c6b55914d70f5dcca40a repo: Azure/azure-rest-api-specs additionalDirectories: From 557f4061e2464ecb43ba4e944ec7bac0ed37e53f Mon Sep 17 00:00:00 2001 From: Ankit Singhal <30610298+singankit@users.noreply.github.com> Date: Mon, 28 Oct 2024 10:34:07 -0700 Subject: [PATCH 054/138] Users/singankit/evaluation async samples (#38104) * Adding async sample for evaluation * Update sample_evaluations_async.py --- .../azure/ai/projects/aio/_patch.py | 50 +++++++++- .../async_samples/evaluate_test_data.jsonl | 3 + .../async_samples/sample_evaluations_async.py | 92 +++++++++++++++++++ .../samples/evaluations/sample_evaluations.py | 32 +++---- 4 files changed, 159 insertions(+), 18 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/samples/evaluations/async_samples/evaluate_test_data.jsonl create mode 100644 sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 5633cd0ddc22..781d25c372d8 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -6,7 +6,10 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from typing import List, Any +import uuid +from os import PathLike +from pathlib import Path +from typing import List, Any, Union, Dict from azure.core import AsyncPipelineClient from azure.core.pipeline import policies from typing_extensions import Self @@ -155,7 +158,7 @@ def __init__( project_name=project_name, credential=credential, api_version="2024-07-01-preview", # TODO: Update me - credential_scopes=["https://management.azure.com"], # TODO: Update once service changes are ready + credential_scopes=["https://ml.azure.com"], # TODO: Update once service changes are ready **kwargs3, ) _policies3 = kwargs3.pop("policies", None) @@ -226,6 +229,49 @@ def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential project_name = parts[3] return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) + def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: + """Upload a file to the Azure AI Studio project. + This method required *azure-ai-ml* to be installed. + + :param file_path: The path to the file to upload. + :type file_path: Union[str, Path, PathLike] + :return: The asset id of uploaded file. + :rtype: str + """ + try: + from azure.ai.ml import MLClient + from azure.ai.ml.entities import Data + from azure.ai.ml.constants import AssetTypes + except ImportError: + raise ImportError( + "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`") + + data = Data( + path=file_path, + type=AssetTypes.URI_FILE, + name=str(uuid.uuid4()), # generating random name + is_anonymous=True, + version="1", + ) + + ml_client = MLClient( + self._config3.credential, + self._config3.subscription_id, + self._config3.resource_group_name, + self._config3.project_name, + ) + + data_asset = ml_client.data.create_or_update(data) + + return data_asset.id + + @property + def scope(self) -> Dict[str, str]: + return { + "subscription_id": self._config3.subscription_id, + "resource_group_name": self._config3.resource_group_name, + "project_name": self._config3.project_name, + } __all__: List[str] = [ "AIProjectClient", diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/evaluate_test_data.jsonl b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/evaluate_test_data.jsonl new file mode 100644 index 000000000000..0396a22c2db6 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/evaluate_test_data.jsonl @@ -0,0 +1,3 @@ +{"query":"How do you create a run?","context":"AML API only","response":"To create a run using the Azure Machine Learning API, you first need to create an Experiment. Once you have an experiment, you can create a Run object that is associated with that experiment. Here is some Python code that demonstrates this process:\n\n```\nfrom azureml.core import Experiment, Run\nfrom azureml.core.workspace import Workspace\n\n# Define workspace and experiment\nws = Workspace.from_config()\nexp = Experiment(workspace=ws, name='my_experiment')\n\n# Create a new run\nrun = exp.start_logging()\n```\n\nIn this code, the `from_config()` method reads the configuration file that you created when you set up your Azure Machine Learning workspace. The `Experiment` constructor creates an Experiment object that is associated with your workspace, and the `start_logging()` method creates a new Run object that is associated with the Experiment. Now you can use the `run` object to log metrics, upload files, and track other information related to your machine learning experiment.", "ground_truth":"Paris is the capital of France."} +{"query":"How do you log a model?","context":"Logging can be done using any OSS Sdk","response":"There are a few ways to log models in Azure Machine Learning. \n\nOne way is to use the `register_model()` method of the `Run` object. The `register_model()` method logs a model file in the Azure Machine Learning service workspace and makes it available for deployment. Here's an example:\n\n```python\nfrom azureml.core import Model\n\nmodel_path = '.\/outputs\/my_model.pkl'\nmodel = Model.register(workspace=ws, model_path=model_path, model_name='my_model')\n```\n\nThis code registers the model file located at `model_path` to the Azure Machine Learning service workspace with the name `my_model`. \n\nAnother way to log a model is to save it as an output of a `Run`. If your model generation code is part of a script or Jupyter notebook that runs as an Azure Machine Learning experiment, you can save the model file as an output of the `Run` object. Here's an example:\n\n```python\nfrom sklearn.linear_model import LogisticRegression\nfrom azureml.core.run import Run\n\n# Initialize a run object\nrun = Run.get_context()\n\n# Train your model\nX_train, y_train = ...\nclf = LogisticRegression().fit(X_train, y_train)\n\n# Save the model to the Run object's outputs directory\nmodel_path = 'outputs\/model.pkl'\njoblib.dump(value=clf, filename=model_path)\n\n# Log the model as a run artifact\nrun.upload_file(name=model_path, path_or_stream=model_path)\n```\n\nIn this code, `Run.get_context()` retrieves the current run context object, which you can use to track metadata and metrics for the run. After training your model, you can use `joblib.dump()` to save the model to a file, and then log the file as an artifact of the run using `run.upload_file()`.","ground_truth":"Paris is the capital of France."} +{"query":"What is the capital of France?","context":"France is in Europe","response":"Paris is the capital of France.", "ground_truth":"Paris is the capital of France."} diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py new file mode 100644 index 000000000000..e27e1a54c528 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py @@ -0,0 +1,92 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_basics_async.py + +DESCRIPTION: + This sample demonstrates how to use evaluation operations from + the Azure Evaluation service using a asynchronous client. + +USAGE: + python sample_evaluation_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity aiohttp + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import time +import os + +from azure.ai.projects.aio import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import Evaluation, Dataset, EvaluatorConfiguration, ConnectionType +from azure.ai.evaluation import F1ScoreEvaluator, RelevanceEvaluator, ViolenceEvaluator + + +async def main(): + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) + + # Upload data for evaluation + data_id = project_client.upload_file("./evaluate_test_data.jsonl") + + default_connection = await project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) + + deployment_name = "<>" + api_version = "<>" + + # Create an evaluation + evaluation = Evaluation( + display_name="Remote Evaluation", + description="Evaluation of dataset", + data=Dataset(id=data_id), + evaluators={ + "f1_score": EvaluatorConfiguration( + # id=F1ScoreEvaluator.id, + id="azureml://registries/azureml-staging/models/F1Score-Evaluator/versions/3", + ), + "relevance": EvaluatorConfiguration( + # id=RelevanceEvaluator.id, + id="azureml://registries/azureml-staging/models/Relevance-Evaluator/versions/3", + init_params={ + "model_config": default_connection.to_evaluator_model_config(deployment_name=deployment_name, + api_version=api_version) + }, + ), + "violence": EvaluatorConfiguration( + # id=ViolenceEvaluator.id, + id="azureml://registries/azureml-staging/models/Violent-Content-Evaluator/versions/3", + init_params={ + "azure_ai_project": project_client.scope + }, + ), + }, + ) + + async with project_client: + # Create evaluation + evaluation_response = await project_client.evaluations.create(evaluation) + + # Get evaluation + get_evaluation_response = await project_client.evaluations.get(evaluation_response.id) + + print("----------------------------------------------------------------") + print("Created evaluation, evaluation ID: ", get_evaluation_response.id) + print("Evaluation status: ", get_evaluation_response.status) + print("AI Studio URI: ", get_evaluation_response.properties["AiStudioEvaluationUri"]) + print("----------------------------------------------------------------") + + + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py index b34f469cfd2f..cbd8b213f002 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py @@ -39,14 +39,12 @@ ) # Upload data for evaluation -# Service side fix needed to make this work -# data_id = project_client.upload_file("./evaluate_test_data.jsonl") -data_id = ( - "azureml://locations/eastus2/workspaces/faa79f3d-91b3-4ed5-afdc-4cc0fe13fb85/data/remote-evals-data/versions/3" -) +data_id = project_client.upload_file("./evaluate_test_data.jsonl") default_connection = project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) +deployment_name = "<>" +api_version = "<>" # Create an evaluation evaluation = Evaluation( @@ -55,26 +53,28 @@ data=Dataset(id=data_id), evaluators={ "f1_score": EvaluatorConfiguration( - id=F1ScoreEvaluator.evaluator_id, + # id=F1ScoreEvaluator.id, + id="azureml://registries/azureml-staging/models/F1Score-Evaluator/versions/3", ), "relevance": EvaluatorConfiguration( - id=RelevanceEvaluator.evaluator_id, + # id=RelevanceEvaluator.id, + id="azureml://registries/azureml-staging/models/Relevance-Evaluator/versions/3", init_params={ - "model_config": default_connection.to_evaluator_model_config( - deployment_name="GPT-4-Prod", api_version="2024-08-01-preview" - ) + "model_config": default_connection.to_evaluator_model_config(deployment_name=deployment_name, + api_version=api_version) }, ), - "hate_unfairness": EvaluatorConfiguration( - id=HateUnfairnessEvaluator.evaluator_id, - init_params={"azure_ai_project": project_client.scope}, + "violence": EvaluatorConfiguration( + # id=ViolenceEvaluator.id, + id="azureml://registries/azureml-staging/models/Violent-Content-Evaluator/versions/3", + init_params={ + "azure_ai_project": project_client.scope + }, ), }, - # This is needed as a workaround until environment gets published to registry - properties={"Environment": "azureml://registries/jamahaja-evals-registry/environments/eval-remote-env/versions/6"}, ) -# Create evaluation + evaluation_response = project_client.evaluations.create( evaluation=evaluation, ) From cdc3ffb5e2a5e6043905207cae617f5c053607ee Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Mon, 28 Oct 2024 12:49:28 -0700 Subject: [PATCH 055/138] Fix sphinx docstring and cSpell (#38125) * Fix sphinx * Fix spelling * Fix * Fix II * Generate new code * Fix test * Fix --- .../azure/ai/projects/_patch.py | 1 + .../ai/projects/aio/operations/_operations.py | 14 +++++++------- .../azure/ai/projects/aio/operations/_patch.py | 2 +- .../azure/ai/projects/models/_models.py | 18 +++++++++--------- .../azure/ai/projects/models/_patch.py | 5 +++-- .../ai/projects/operations/_operations.py | 14 +++++++------- .../azure/ai/projects/operations/_patch.py | 2 +- ...nts_vector_store_batch_file_search_async.py | 2 +- ...le_agents_vector_store_batch_file_search.py | 2 +- .../evaluations/evaluate_test_data.jsonl | 2 +- ...get_chat_completions_client_with_tracing.py | 1 - sdk/ai/azure-ai-projects/tests/README.md | 7 +++++++ .../tests/agents/test_agents_client.py | 8 +++++--- sdk/ai/azure-ai-projects/tests/conftest.py | 1 + .../connections/test_connections_unit_tests.py | 1 + .../tests/diagnostics/diagnostics_test_base.py | 2 +- .../tests/diagnostics/test_diagnostics.py | 8 ++++++-- .../diagnostics/test_diagnostics_async.py | 6 +++++- .../tests/inference/test_inference.py | 4 +++- .../tests/inference/test_inference_async.py | 4 +++- sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 21 files changed, 65 insertions(+), 41 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index 3639241d9dae..9be1af9b2828 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -150,6 +150,7 @@ def __init__( self._client2 = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) # For Cloud Evaluations operations + # cSpell:disable-next-line _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long self._config3 = AIProjectClientConfiguration( endpoint=endpoint, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index 7b0a6dd2a28a..811234a2db50 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -1323,10 +1323,10 @@ async def create_message( * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. + cases to represent user-generated messages. * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Known values are: "user" and "assistant". Required. + messages from the agent into the + conversation. Known values are: "user" and "assistant". Required. :paramtype role: str or ~azure.ai.projects.models.MessageRole :keyword content: The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via @@ -1388,10 +1388,10 @@ async def create_message( * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. + cases to represent user-generated messages. * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Known values are: "user" and "assistant". Required. + messages from the agent into the + conversation. Known values are: "user" and "assistant". Required. :paramtype role: str or ~azure.ai.projects.models.MessageRole :keyword content: The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via @@ -5305,7 +5305,7 @@ async def get_app_insights(self, app_insights_resource_url: str, **kwargs: Any) :param app_insights_resource_url: The AppInsights Azure resource Url. It should have the format: - ``/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/microsoft.insights/components/{resource-name}``. + '/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/microsoft.insights/components/{resourcename}'. Required. :type app_insights_resource_url: str :return: GetAppInsightsResponse. The GetAppInsightsResponse is compatible with MutableMapping diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index e046109ac356..67a488e8b391 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -289,7 +289,7 @@ async def list( """List the properties of all connections, or all connections of a certain connection type. :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. - If not provided, all connections are listed. + If not provided, all connections are listed. :type connection_type: ~azure.ai.projects.models._models.ConnectionType :return: A list of connection properties :rtype: Iterable[~azure.ai.projects.models._models.ConnectionProperties] diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 35470d11723c..28d27e0e71f6 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -4987,10 +4987,10 @@ class ThreadMessageOptions(_model_base.Model): * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. + cases to represent user-generated messages. * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Required. Known values are: "user" and "assistant". + messages from the agent into the + conversation. Required. Known values are: "user" and "assistant". :vartype role: str or ~azure.ai.projects.models.MessageRole :ivar content: The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via @@ -5009,11 +5009,11 @@ class ThreadMessageOptions(_model_base.Model): """The role of the entity that is creating the message. Allowed values include: - * ``user``\ : Indicates the message is sent by an actual user and should be used in most cases - to represent user-generated messages. + * ``user``\ : Indicates the message is sent by an actual user and should be used in most + cases to represent user-generated messages. * ``assistant``\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Required. Known values are: \"user\" and \"assistant\".""" + messages from the agent into the + conversation. Required. Known values are: \"user\" and \"assistant\".""" content: str = rest_field() """The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via @@ -6095,7 +6095,7 @@ class VectorStoreStaticChunkingStrategyOptions(_model_base.Model): :vartype max_chunk_size_tokens: int :ivar chunk_overlap_tokens: The number of tokens that overlap between chunks. The default value is 400. - Note that the overlap must not exceed half of max_chunk_size_tokens. *. Required. + Note that the overlap must not exceed half of max_chunk_size_tokens. Required. :vartype chunk_overlap_tokens: int """ @@ -6104,7 +6104,7 @@ class VectorStoreStaticChunkingStrategyOptions(_model_base.Model): and the maximum value is 4096. Required.""" chunk_overlap_tokens: int = rest_field() """The number of tokens that overlap between chunks. The default value is 400. - Note that the overlap must not exceed half of max_chunk_size_tokens. *. Required.""" + Note that the overlap must not exceed half of max_chunk_size_tokens. Required.""" @overload def __init__( diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index a630946caa1a..75bdbee3f0c7 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -1,4 +1,5 @@ # pylint: disable=too-many-lines +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -284,8 +285,8 @@ def __init__(self, functions: Set[Callable[..., Any]]): self._functions = self._create_function_dict(functions) self._definitions = self._build_function_definitions(self._functions) - def _create_function_dict(self, funcs: Set[Callable[..., Any]]) -> Dict[str, Callable[..., Any]]: - func_dict = {func.__name__: func for func in funcs} + def _create_function_dict(self, functions: Set[Callable[..., Any]]) -> Dict[str, Callable[..., Any]]: + func_dict = {func.__name__: func for func in functions} return func_dict def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDefinition]: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index 63aecae91335..507b9f7c7d70 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -2717,10 +2717,10 @@ def create_message( * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. + cases to represent user-generated messages. * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Known values are: "user" and "assistant". Required. + messages from the agent into the + conversation. Known values are: "user" and "assistant". Required. :paramtype role: str or ~azure.ai.projects.models.MessageRole :keyword content: The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via @@ -2782,10 +2782,10 @@ def create_message( * ``user``\\ : Indicates the message is sent by an actual user and should be used in most - cases to represent user-generated messages. + cases to represent user-generated messages. * ``assistant``\\ : Indicates the message is generated by the agent. Use this value to insert - messages from the agent into - the conversation. Known values are: "user" and "assistant". Required. + messages from the agent into the + conversation. Known values are: "user" and "assistant". Required. :paramtype role: str or ~azure.ai.projects.models.MessageRole :keyword content: The textual content of the initial message. Currently, robust input including images and annotated text may only be provided via @@ -6699,7 +6699,7 @@ def get_app_insights(self, app_insights_resource_url: str, **kwargs: Any) -> _mo :param app_insights_resource_url: The AppInsights Azure resource Url. It should have the format: - ``/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/microsoft.insights/components/{resource-name}``. + '/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/microsoft.insights/components/{resourcename}'. Required. :type app_insights_resource_url: str :return: GetAppInsightsResponse. The GetAppInsightsResponse is compatible with MutableMapping diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 1df36b0cbac6..db9824441ea9 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -298,7 +298,7 @@ def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) """List the properties of all connections, or all connections of a certain connection type. :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. - If not provided, all connections are listed. + If not provided, all connections are listed. :type connection_type: ~azure.ai.projects.models._models.ConnectionType :return: A list of connection properties :rtype: Iterable[~azure.ai.projects.models._models.ConnectionProperties] diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py index 0f5dbc6e9efa..15adab5c996a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -102,7 +102,7 @@ async def main(): print("Deleted file") await project_client.agents.delete_vector_store(vector_store.id) - print("Deleted vectore store") + print("Deleted vector store") await project_client.agents.delete_agent(agent.id) print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py index 08c717f089f0..a07cadd45e8e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -98,7 +98,7 @@ print("Deleted file") project_client.agents.delete_vector_store(vector_store.id) - print("Deleted vectore store") + print("Deleted vector store") project_client.agents.delete_agent(agent.id) print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/evaluate_test_data.jsonl b/sdk/ai/azure-ai-projects/samples/evaluations/evaluate_test_data.jsonl index 0396a22c2db6..ddca8949914d 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/evaluate_test_data.jsonl +++ b/sdk/ai/azure-ai-projects/samples/evaluations/evaluate_test_data.jsonl @@ -1,3 +1,3 @@ {"query":"How do you create a run?","context":"AML API only","response":"To create a run using the Azure Machine Learning API, you first need to create an Experiment. Once you have an experiment, you can create a Run object that is associated with that experiment. Here is some Python code that demonstrates this process:\n\n```\nfrom azureml.core import Experiment, Run\nfrom azureml.core.workspace import Workspace\n\n# Define workspace and experiment\nws = Workspace.from_config()\nexp = Experiment(workspace=ws, name='my_experiment')\n\n# Create a new run\nrun = exp.start_logging()\n```\n\nIn this code, the `from_config()` method reads the configuration file that you created when you set up your Azure Machine Learning workspace. The `Experiment` constructor creates an Experiment object that is associated with your workspace, and the `start_logging()` method creates a new Run object that is associated with the Experiment. Now you can use the `run` object to log metrics, upload files, and track other information related to your machine learning experiment.", "ground_truth":"Paris is the capital of France."} -{"query":"How do you log a model?","context":"Logging can be done using any OSS Sdk","response":"There are a few ways to log models in Azure Machine Learning. \n\nOne way is to use the `register_model()` method of the `Run` object. The `register_model()` method logs a model file in the Azure Machine Learning service workspace and makes it available for deployment. Here's an example:\n\n```python\nfrom azureml.core import Model\n\nmodel_path = '.\/outputs\/my_model.pkl'\nmodel = Model.register(workspace=ws, model_path=model_path, model_name='my_model')\n```\n\nThis code registers the model file located at `model_path` to the Azure Machine Learning service workspace with the name `my_model`. \n\nAnother way to log a model is to save it as an output of a `Run`. If your model generation code is part of a script or Jupyter notebook that runs as an Azure Machine Learning experiment, you can save the model file as an output of the `Run` object. Here's an example:\n\n```python\nfrom sklearn.linear_model import LogisticRegression\nfrom azureml.core.run import Run\n\n# Initialize a run object\nrun = Run.get_context()\n\n# Train your model\nX_train, y_train = ...\nclf = LogisticRegression().fit(X_train, y_train)\n\n# Save the model to the Run object's outputs directory\nmodel_path = 'outputs\/model.pkl'\njoblib.dump(value=clf, filename=model_path)\n\n# Log the model as a run artifact\nrun.upload_file(name=model_path, path_or_stream=model_path)\n```\n\nIn this code, `Run.get_context()` retrieves the current run context object, which you can use to track metadata and metrics for the run. After training your model, you can use `joblib.dump()` to save the model to a file, and then log the file as an artifact of the run using `run.upload_file()`.","ground_truth":"Paris is the capital of France."} +{"query":"How do you log a model?","context":"Logging can be done using any OSS Sdk","response":"There are a few ways to log models in Azure Machine Learning. \n\nOne way is to use the `register_model()` method of the `Run` object. The `register_model()` method logs a model file in the Azure Machine Learning service workspace and makes it available for deployment. Here's an example:\n\n```python\nfrom azureml.core import Model\n\nmodel_path = '.\/outputs\/my_model.pkl'\nmodel = Model.register(workspace=ws, model_path=model_path, model_name='my_model')\n```\n\nThis code registers the model file located at `model_path` to the Azure Machine Learning service workspace with the name `my_model`. \n\nAnother way to log a model is to save it as an output of a `Run`. If your model generation code is part of a script or Jupyter notebook that runs as an Azure Machine Learning experiment, you can save the model file as an output of the `Run` object. Here's an example:\n\n```python\nfrom sklearn.linear_model import LogisticRegression\nfrom azureml.core.run import Run\n\n# Initialize a run object\nrun = Run.get_context()\n\n# Train your model\nX_train, y_train = ...\nlog_reg = LogisticRegression().fit(X_train, y_train)\n\n# Save the model to the Run object's outputs directory\nmodel_path = 'outputs\/model.pkl'\njoblib.dump(value=log_reg, filename=model_path)\n\n# Log the model as a run artifact\nrun.upload_file(name=model_path, path_or_stream=model_path)\n```\n\nIn this code, `Run.get_context()` retrieves the current run context object, which you can use to track metadata and metrics for the run. After training your model, you can use `joblib.dump()` to save the model to a file, and then log the file as an artifact of the run using `run.upload_file()`.","ground_truth":"Paris is the capital of France."} {"query":"What is the capital of France?","context":"France is in Europe","response":"Paris is the capital of France.", "ground_truth":"Paris is the capital of France."} diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client_with_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client_with_tracing.py index ca86b7a126de..d31ca722255e 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client_with_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client_with_tracing.py @@ -30,7 +30,6 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as project_client: - if not project_client.diagnostics.enable(): print("Application Insights was not enabled for this project.") print("Enable it via the 'Tracing' tab under 'Tools', in your AI Studio project page.") diff --git a/sdk/ai/azure-ai-projects/tests/README.md b/sdk/ai/azure-ai-projects/tests/README.md index cddf5f0e6ff2..b33de5300a94 100644 --- a/sdk/ai/azure-ai-projects/tests/README.md +++ b/sdk/ai/azure-ai-projects/tests/README.md @@ -29,6 +29,13 @@ az login ``` ## Setup for running tests in the `agents` folder +**Note:** The environment variables required by the test are defined in `agentClientPreparer`. **It is important project name to be the part of environment variable!** For example, the project is `azure_ai_projects` and the variable may be called `azure_ai_projects_connection_string`. The variables without `azure_ai_projects` substrings will be ignored according to logic of `EnvironmentVariableLoader`. The values of these variables will be supplied to kwargs of the unit tests, decorated by `EnvironmentVariableLoader` function. + +```bash +set PROJECT_CONNECTION_STRING_AGENTS_TESTS= +set AZURE_AI_PROJECTS_CONNECTION_STRING= +set AZURE_AI_PROJECTS_DATA_PATH= +``` ## Setup for running tests in the `evaluations` folder diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index 1c0367f2f417..f262cfe86cdb 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -3,6 +3,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ +# cSpell:disable import os import json import time @@ -46,7 +47,8 @@ agentClientPreparer = functools.partial( EnvironmentVariableLoader, "azure_ai_project", - project_connection_string_agents_tests="https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", + # cSpell:disable-next-line + azure_ai_projects_connection_string="https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", ) """ agentClientPreparer = functools.partial( @@ -96,13 +98,13 @@ class TestagentClient(AzureRecordedTestCase): # helper function: create client and using environment variables def create_client(self, **kwargs): # fetch environment variables - connection_string = kwargs.pop("project_connection_string_agents_tests") + connection_string = kwargs.pop("azure_ai_projects_connection_string") credential = self.get_credential(AIProjectClient, is_async=False) # create and return client client = AIProjectClient.from_connection_string( credential=credential, - connection=connection_string, + conn_str=connection_string, ) return client diff --git a/sdk/ai/azure-ai-projects/tests/conftest.py b/sdk/ai/azure-ai-projects/tests/conftest.py index 95b7f817c771..ab514fc08f68 100644 --- a/sdk/ai/azure-ai-projects/tests/conftest.py +++ b/sdk/ai/azure-ai-projects/tests/conftest.py @@ -10,6 +10,7 @@ if not load_dotenv(find_dotenv(filename="azure_ai_projects_tests.env"), override=True): print("Failed to apply environment variables for azure-ai-projects tests.") + # autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method @pytest.fixture(scope="session", autouse=True) def start_proxy(test_proxy): diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py index 599ee2a8a161..d94519b2324f 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py @@ -2,6 +2,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ +# cSpell:disable import datetime from azure.ai.projects.models import SASTokenCredential from azure.core.credentials import TokenCredential, AccessToken diff --git a/sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py b/sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py index ce70bc4f52de..c6557d8d49bf 100644 --- a/sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py @@ -57,4 +57,4 @@ def get_async_client(self, **kwargs) -> AIProjectClient: conn_str=conn_str, logging_enable=LOGGING_ENABLED, ) - return project_client \ No newline at end of file + return project_client diff --git a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py index 1d71fa8a1374..9a9b796c452f 100644 --- a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py +++ b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py @@ -17,5 +17,9 @@ def test_diagnostics(self, **kwargs): assert project_client.diagnostics.connection_string == None assert project_client.diagnostics.enable() == True assert project_client.diagnostics.connection_string is not None - assert bool(DiagnosticsTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match(project_client.diagnostics.connection_string)) - assert project_client.diagnostics.enable() == True \ No newline at end of file + assert bool( + DiagnosticsTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match( + project_client.diagnostics.connection_string + ) + ) + assert project_client.diagnostics.enable() == True diff --git a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py index c68854a76926..29d1b0bd5075 100644 --- a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py +++ b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py @@ -17,5 +17,9 @@ async def test_diagnostics_async(self, **kwargs): assert project_client.diagnostics.connection_string == None assert await project_client.diagnostics.enable() == True assert project_client.diagnostics.connection_string is not None - assert bool(DiagnosticsTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match(project_client.diagnostics.connection_string)) + assert bool( + DiagnosticsTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match( + project_client.diagnostics.connection_string + ) + ) assert await project_client.diagnostics.enable() == True diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py index 6b1ed698b653..5403c75a96b4 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py @@ -17,7 +17,9 @@ def test_inference_get_azure_openai_client(self, **kwargs): model = kwargs.pop("azure_ai_projects_inference_tests_model_deployment_name") with self.get_sync_client(**kwargs) as project_client: # See API versions in https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - with project_client.inference.get_azure_openai_client(api_version="2024-10-01-preview") as azure_openai_client: + with project_client.inference.get_azure_openai_client( + api_version="2024-10-01-preview" + ) as azure_openai_client: response = azure_openai_client.chat.completions.create( messages=[ { diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py index 3273fe9849d7..df1988c411e8 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py @@ -17,7 +17,9 @@ async def test_inference_get_azure_openai_client_async(self, **kwargs): model = kwargs.pop("azure_ai_projects_inference_tests_model_deployment_name") async with self.get_async_client(**kwargs) as project_client: # See API versions in https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - async with await project_client.inference.get_azure_openai_client(api_version="2024-10-01-preview") as azure_openai_client: + async with await project_client.inference.get_azure_openai_client( + api_version="2024-10-01-preview" + ) as azure_openai_client: response = await azure_openai_client.chat.completions.create( messages=[ { diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index b080c3792844..753943b71b76 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: ff1c3a7b447adbea65e8c6b55914d70f5dcca40a +commit: 42c5291babc24c6fd5dd6ce8ece4b5d84203b082 repo: Azure/azure-rest-api-specs additionalDirectories: From f9967f624d167fd7844539a48187e59dbe9cae08 Mon Sep 17 00:00:00 2001 From: Sai Kothinti Date: Tue, 29 Oct 2024 23:11:06 +0530 Subject: [PATCH 056/138] Disable evaluation schedule and remove sampling rate (#38161) * sdk changes for online eval disable schedule and remove sampling rate * change sample --- .../ai/projects/aio/operations/_operations.py | 18 ++++------ .../azure/ai/projects/models/__init__.py | 2 -- .../azure/ai/projects/models/_models.py | 34 ------------------- .../ai/projects/operations/_operations.py | 28 +++++++-------- .../sample_evaluations_schedules.py | 6 ++-- sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 6 files changed, 21 insertions(+), 69 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index 811234a2db50..0ab9b59da258 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -80,7 +80,7 @@ build_diagnostics_get_app_insights_request, build_evaluations_create_or_replace_schedule_request, build_evaluations_create_request, - build_evaluations_delete_schedule_request, + build_evaluations_disable_schedule_request, build_evaluations_get_request, build_evaluations_get_schedule_request, build_evaluations_list_request, @@ -6141,11 +6141,10 @@ async def get_next(next_link=None): return AsyncItemPaged(get_next, extract_data) @distributed_trace_async - async def delete_schedule(self, name: str, **kwargs: Any) -> None: - """Resource delete operation template. + async def disable_schedule(self, name: str, **kwargs: Any) -> None: + """Disable the evaluation schedule. - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. + :param name: Name of the evaluation schedule. Required. :type name: str :return: None :rtype: None @@ -6164,7 +6163,7 @@ async def delete_schedule(self, name: str, **kwargs: Any) -> None: cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_evaluations_delete_schedule_request( + _request = build_evaluations_disable_schedule_request( name=name, api_version=self._config.api_version, headers=_headers, @@ -6191,10 +6190,5 @@ async def delete_schedule(self, name: str, **kwargs: Any) -> None: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - if cls: - return cls(pipeline_response, None, response_headers) # type: ignore + return cls(pipeline_response, None, {}) # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py index 933a62c1e5b6..55da8389ac8b 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py @@ -123,7 +123,6 @@ RunStepSharepointToolCall, RunStepToolCall, RunStepToolCallDetails, - SamplingStrategy, SharepointToolDefinition, SubmitToolOutputsAction, SubmitToolOutputsDetails, @@ -308,7 +307,6 @@ "RunStepSharepointToolCall", "RunStepToolCall", "RunStepToolCallDetails", - "SamplingStrategy", "SharepointToolDefinition", "SubmitToolOutputsAction", "SubmitToolOutputsDetails", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 28d27e0e71f6..d57f25a7b5e7 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -1052,8 +1052,6 @@ class EvaluationSchedule(_model_base.Model): :vartype evaluators: dict[str, ~azure.ai.projects.models.EvaluatorConfiguration] :ivar trigger: Trigger for the evaluation. Required. :vartype trigger: ~azure.ai.projects.models.Trigger - :ivar sampling_strategy: Sampling strategy for the evaluation. Required. - :vartype sampling_strategy: ~azure.ai.projects.models.SamplingStrategy """ name: str = rest_field(visibility=["read"]) @@ -1076,8 +1074,6 @@ class EvaluationSchedule(_model_base.Model): """Evaluators to be used for the evaluation. Required.""" trigger: "_models.Trigger" = rest_field() """Trigger for the evaluation. Required.""" - sampling_strategy: "_models.SamplingStrategy" = rest_field(name="samplingStrategy") - """Sampling strategy for the evaluation. Required.""" @overload def __init__( @@ -1086,7 +1082,6 @@ def __init__( data: "_models.ApplicationInsightsConfiguration", evaluators: Dict[str, "_models.EvaluatorConfiguration"], trigger: "_models.Trigger", - sampling_strategy: "_models.SamplingStrategy", description: Optional[str] = None, tags: Optional[Dict[str, str]] = None, properties: Optional[Dict[str, str]] = None, @@ -4672,35 +4667,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, type=RunStepType.TOOL_CALLS, **kwargs) -class SamplingStrategy(_model_base.Model): - """SamplingStrategy Definition. - - - :ivar rate: Sampling rate. Required. - :vartype rate: float - """ - - rate: float = rest_field() - """Sampling rate. Required.""" - - @overload - def __init__( - self, - *, - rate: float, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - class SharepointToolDefinition(ToolDefinition, discriminator="sharepoint"): """The input definition information for a sharepoint tool as used to configure an agent. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index 507b9f7c7d70..2d1a13cfab5d 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -1473,15 +1473,17 @@ def build_evaluations_list_schedule_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_evaluations_delete_schedule_request(name: str, **kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long +def build_evaluations_disable_schedule_request( # pylint: disable=name-too-long + name: str, **kwargs: Any +) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-07-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("apiVersion", "2024-07-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL - _url = "/evaluations/schedules/{name}" + _url = "/evaluations/schedules/{name}/disable" path_format_arguments = { "name": _SERIALIZER.url("name", name, "str"), } @@ -1489,12 +1491,12 @@ def build_evaluations_delete_schedule_request(name: str, **kwargs: Any) -> HttpR _url: str = _url.format(**path_format_arguments) # type: ignore # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + _params["apiVersion"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) class AgentsOperations: # pylint: disable=too-many-public-methods @@ -7533,11 +7535,10 @@ def get_next(next_link=None): return ItemPaged(get_next, extract_data) @distributed_trace - def delete_schedule(self, name: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements - """Resource delete operation template. + def disable_schedule(self, name: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Disable the evaluation schedule. - :param name: Name of the schedule, which also serves as the unique identifier for the - evaluation. Required. + :param name: Name of the evaluation schedule. Required. :type name: str :return: None :rtype: None @@ -7556,7 +7557,7 @@ def delete_schedule(self, name: str, **kwargs: Any) -> None: # pylint: disable= cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_evaluations_delete_schedule_request( + _request = build_evaluations_disable_schedule_request( name=name, api_version=self._config.api_version, headers=_headers, @@ -7583,10 +7584,5 @@ def delete_schedule(self, name: str, **kwargs: Any) -> None: # pylint: disable= map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) - response_headers = {} - response_headers["x-ms-client-request-id"] = self._deserialize( - "str", response.headers.get("x-ms-client-request-id") - ) - if cls: - return cls(pipeline_response, None, response_headers) # type: ignore + return cls(pipeline_response, None, {}) # type: ignore diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py index e4ad81dac368..18cbb8fb59ac 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py @@ -45,7 +45,6 @@ def main(): "f1_score": f1_evaluator_config, } - sampling_strategy = SamplingStrategy(rate=0.2) name = "CANARY-ONLINE-EVAL-TEST-WS-ENV-104" description = "Testing Online eval command job in CANARY environment" tags = {"tag1": "value1", "tag2": "value2"} @@ -57,7 +56,6 @@ def main(): data=app_insights_config, evaluators=evaluators, trigger=recurrence_trigger, - sampling_strategy=sampling_strategy, description=description, tags=tags, properties=properties, @@ -75,8 +73,8 @@ def main(): for evaluation_schedule in ai_client.evaluations.list_schedule(): print(evaluation_schedule) - # Sample for delete an evaluation schedule with name - ai_client.evaluations.delete_schedule(name) + # Sample for disable an evaluation schedule with name + ai_client.evaluations.disable_schedule(name) if __name__ == "__main__": diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index 753943b71b76..f54fc30b641c 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: 42c5291babc24c6fd5dd6ce8ece4b5d84203b082 +commit: d60552bb88d19b2c7cc7978619aeddcc5f9dd602 repo: Azure/azure-rest-api-specs additionalDirectories: From f780407c344f489044c98e4224258521135b3cbb Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 29 Oct 2024 11:22:21 -0700 Subject: [PATCH 057/138] Add tests for Connection operations (#38156) --- .../azure/ai/projects/models/_models.py | 14 +- .../azure/ai/projects/models/_patch.py | 8 +- .../azure/ai/projects/operations/_patch.py | 2 +- .../azure_ai_projects_tests.env | 8 ++ .../async_samples/sample_connections_async.py | 2 - .../samples/connections/sample_connections.py | 2 - .../samples/evaluations/sample_evaluations.py | 2 - ...s_with_azure_ai_inference_client_async.py} | 5 +- ...letions_with_azure_openai_client_async.py} | 4 +- ...s_with_azure_ai_inference_client_async.py} | 7 +- ...letions_with_azure_ai_inference_client.py} | 8 +- ...t_completions_with_azure_openai_client.py} | 4 +- ...et_chat_completions_client_with_tracing.py | 45 ------- ...eddings_with_azure_ai_inference_client.py} | 7 +- .../tests/connections/connection_test_base.py | 54 +++++++- .../tests/connections/test_connections.py | 103 ++++++++++++++- .../connections/test_connections_async.py | 123 ++++++++++++++++++ .../tests/inference/inference_test_base.py | 9 +- 18 files changed, 308 insertions(+), 99 deletions(-) rename sdk/ai/azure-ai-projects/samples/inference/async_samples/{sample_get_chat_completions_client_async.py => sample_chat_completions_with_azure_ai_inference_client_async.py} (93%) rename sdk/ai/azure-ai-projects/samples/inference/async_samples/{sample_get_azure_openai_client_async.py => sample_chat_completions_with_azure_openai_client_async.py} (93%) rename sdk/ai/azure-ai-projects/samples/inference/async_samples/{sample_get_embeddings_client_async.py => sample_text_embeddings_with_azure_ai_inference_client_async.py} (87%) rename sdk/ai/azure-ai-projects/samples/inference/{sample_get_chat_completions_client.py => sample_chat_completions_with_azure_ai_inference_client.py} (81%) rename sdk/ai/azure-ai-projects/samples/inference/{sample_get_azure_openai_client.py => sample_chat_completions_with_azure_openai_client.py} (93%) delete mode 100644 sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client_with_tracing.py rename sdk/ai/azure-ai-projects/samples/inference/{sample_get_embeddings_client.py => sample_text_embeddings_with_azure_ai_inference_client.py} (85%) create mode 100644 sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index d57f25a7b5e7..d11baf407304 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -699,11 +699,11 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ConnectionProperties(_model_base.Model): +class InternalConnectionProperties(_model_base.Model): """Connection properties. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ConnectionPropertiesAADAuth, ConnectionPropertiesApiKeyAuth, ConnectionPropertiesSASAuth + InternalConnectionPropertiesAADAuth, InternalConnectionPropertiesApiKeyAuth, InternalConnectionPropertiesSASAuth :ivar auth_type: Authentication type of the connection target. Required. Known values are: @@ -717,7 +717,7 @@ class ConnectionProperties(_model_base.Model): and \"SAS\".""" -class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): +class InternalConnectionPropertiesAADAuth(InternalConnectionProperties, discriminator="AAD"): """Connection properties for connections with AAD authentication (aka ``Entra ID passthrough``\\ ). @@ -741,7 +741,7 @@ class ConnectionPropertiesAADAuth(ConnectionProperties, discriminator="AAD"): """The connection URL to be used for this service. Required.""" -class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey"): +class InternalConnectionPropertiesApiKeyAuth(InternalConnectionProperties, discriminator="ApiKey"): """Connection properties for connections with API key authentication. @@ -767,7 +767,7 @@ class ConnectionPropertiesApiKeyAuth(ConnectionProperties, discriminator="ApiKey """The connection URL to be used for this service. Required.""" -class ConnectionPropertiesSASAuth(ConnectionProperties, discriminator="SAS"): +class InternalConnectionPropertiesSASAuth(InternalConnectionProperties, discriminator="SAS"): """Connection properties for connections with SAS authentication. @@ -1498,14 +1498,14 @@ class GetConnectionResponse(_model_base.Model): :ivar name: The name of the resource. Required. :vartype name: str :ivar properties: The properties of the resource. Required. - :vartype properties: ~azure.ai.projects.models._models.ConnectionProperties + :vartype properties: ~azure.ai.projects.models._models.InternalConnectionProperties """ id: str = rest_field() """A unique identifier for the connection. Required.""" name: str = rest_field() """The name of the resource. Required.""" - properties: "_models._models.ConnectionProperties" = rest_field() + properties: "_models._models.InternalConnectionProperties" = rest_field() """The properties of the resource. Required.""" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 75bdbee3f0c7..15f2a6d2c904 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -143,12 +143,11 @@ def __str__(self): out += f' "connection_type": "{self.connection_type}",\n' out += f' "endpoint_url": "{self.endpoint_url}",\n' if self.key: - out += f' "key": "{self.key}",\n' + out += f' "key": "REDACTED"\n' else: - out += f' "key": null,\n' + out += f' "key": null\n' if self.token_credential: - access_token = self.token_credential.get_token("https://cognitiveservices.azure.com/.default") - out += f' "token_credential": "{access_token.token}", expires on {access_token.expires_on} ({datetime.datetime.fromtimestamp(access_token.expires_on, datetime.timezone.utc)})\n' + out += f' "token_credential": "REDACTED"\n' else: out += f' "token_credential": null\n' out += "}\n" @@ -1000,6 +999,7 @@ def until_done(self) -> None: "AsyncFunctionTool", "AsyncToolSet", "CodeInterpreterTool", + "ConnectionProperties", "FileSearchTool", "FunctionTool", "SASTokenCredential", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index db9824441ea9..a93ae8a710da 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -133,7 +133,7 @@ def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": from azure.core.credentials import AzureKeyCredential client = EmbeddingsClient( - endpoint=connection.authentication_type, credential=AzureKeyCredential(connection.key) + endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) ) elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth diff --git a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env index 648c37a65c54..9b887cfc344d 100644 --- a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env +++ b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env @@ -2,8 +2,16 @@ # Environment variables required for running tests # +# Default to live tests without recordings: +AZURE_TEST_RUN_LIVE=true +AZURE_SKIP_LIVE_RECORDING=true + # To run Connection tests: AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING= +AZURE_AI_PROJECTS_CONNECTIONS_TESTS_DEFAULT_AOAI_CONNECTION_NAME= +AZURE_AI_PROJECTS_CONNECTIONS_TESTS_DEFAULT_SERVERLESS_CONNECTION_NAME= +AZURE_AI_PROJECTS_CONNECTIONS_TESTS_AOAI_CONNECTION_NAME= +AZURE_AI_PROJECTS_CONNECTIONS_TESTS_SERVERLESS_CONNECTION_NAME= # To run Inference tests: AZURE_AI_PROJECTS_INFERENCE_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING} diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py index 122c166b3748..1f5dd56b4334 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -4,8 +4,6 @@ # ------------------------------------ """ -FILE: sample_connections_async.py - DESCRIPTION: Given an asynchronous AIProjectClient, this sample demonstrates how to enumerate connections and get connections properties. diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index 50d7fe8da1c1..bbc16950709a 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -4,8 +4,6 @@ # ------------------------------------ """ -FILE: sample_connections.py - DESCRIPTION: Given an AIProjectClient, this sample demonstrates how to enumerate connections and get connection properties. diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py index cbd8b213f002..991bd1c56afd 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py @@ -4,8 +4,6 @@ # ------------------------------------ """ -FILE: sample_agents_basics.py - DESCRIPTION: This sample demonstrates how to use basic agent operations from the Azure Agents service using a synchronous client. diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_chat_completions_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py similarity index 93% rename from sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_chat_completions_client_async.py rename to sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py index 27e50fd8359d..8c4f2dda13e0 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_chat_completions_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py @@ -4,11 +4,10 @@ # ------------------------------------ """ -FILE: sample_get_chat_completions_client_async.py - DESCRIPTION: Given an AIProjectClient, this sample demonstrates how to get an authenticated - async ChatCompletionsClient from the azure.ai.inference package. + async ChatCompletionsClient from the azure.ai.inference package. For more information + on the azure.ai.inference package see https://pypi.org/project/azure-ai-inference/. USAGE: python sample_get_chat_completions_client_async.py diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_azure_openai_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py similarity index 93% rename from sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_azure_openai_client_async.py rename to sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py index 874f789c16f0..7de71c0a7c25 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_azure_openai_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py @@ -4,8 +4,6 @@ # ------------------------------------ """ -FILE: sample_get_azure_openai_client_async.py - DESCRIPTION: Given an AIProjectClient, this sample demonstrates how to get an authenticated AsyncAzureOpenAI client from the azure.ai.inference package. @@ -17,7 +15,7 @@ pip install azure.ai.projects aiohttp openai_async - Set this environment variable with your own values: + Set this environment variable with your own value: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_embeddings_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py similarity index 87% rename from sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_embeddings_client_async.py rename to sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py index c7154a4f6aaa..e14f1647b336 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_get_embeddings_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py @@ -4,11 +4,10 @@ # ------------------------------------ """ -FILE: sample_get_embeddings_client_async.py - DESCRIPTION: Given an AIProjectClient, this sample demonstrates how to get an authenticated - async EmbeddingsClient from the azure.ai.inference package. + async EmbeddingsClient from the azure.ai.inference package. For more information + on the azure.ai.inference package see https://pypi.org/project/azure-ai-inference/. USAGE: python sample_get_embeddings_client_async.py @@ -17,7 +16,7 @@ pip install azure.ai.projects aiohttp azure-identity - Set this environment variable with your own values: + Set this environment variable with your own value: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import asyncio diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py similarity index 81% rename from sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client.py rename to sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py index 16c3b40f7b45..8552defe5bca 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py @@ -4,14 +4,14 @@ # ------------------------------------ """ -FILE: sample_get_chat_completions_client.py - DESCRIPTION: Given an AIProjectClient, this sample demonstrates how to get an authenticated - async ChatCompletionsClient from the azure.ai.inference package. + async ChatCompletionsClient from the azure.ai.inference package. For more information + on the azure.ai.inference package see https://pypi.org/project/azure-ai-inference/. USAGE: python sample_get_chat_completions_client.py + python sample_get_chat_completions_client.py Before running the sample: @@ -30,7 +30,7 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as project_client: - # Get an authenticated azure.ai.inference chat completions client for your default Serverless connection: + # Get an authenticated azure.ai.inference ChatCompletionsClient for your default Serverless connection: with project_client.inference.get_chat_completions_client() as client: response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_get_azure_openai_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py similarity index 93% rename from sdk/ai/azure-ai-projects/samples/inference/sample_get_azure_openai_client.py rename to sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py index e2dc15e93c59..5b5e794c2e1f 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_get_azure_openai_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py @@ -4,8 +4,6 @@ # ------------------------------------ """ -FILE: sample_get_azure_openai_client.py - DESCRIPTION: Given an AIProjectClient, this sample demonstrates how to get an authenticated AsyncAzureOpenAI client from the azure.ai.inference package. @@ -17,7 +15,7 @@ pip install azure.ai.projects openai - Set this environment variable with your own values: + Set this environment variable with your own value: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client_with_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client_with_tracing.py deleted file mode 100644 index d31ca722255e..000000000000 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_get_chat_completions_client_with_tracing.py +++ /dev/null @@ -1,45 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_get_chat_completions_client_with_tracing.py - -DESCRIPTION: - Given an AIProjectClient, this sample demonstrates how to get an authenticated - async ChatCompletionsClient from the azure.ai.inference package. - -USAGE: - python sample_get_chat_completions_client_with_tracing.py - - Before running the sample: - - pip install azure.ai.projects azure-identity - - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" -import os -from azure.ai.projects import AIProjectClient -from azure.ai.inference.models import UserMessage -from azure.identity import DefaultAzureCredential - -with AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) as project_client: - - if not project_client.diagnostics.enable(): - print("Application Insights was not enabled for this project.") - print("Enable it via the 'Tracing' tab under 'Tools', in your AI Studio project page.") - exit() - - print(f"Applications Insights connection string = {project_client.diagnostics.connection_string}") - - # Get an authenticated azure.ai.inference chat completions client for your default Serverless connection: - with project_client.inference.get_chat_completions_client() as client: - - response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - - print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_get_embeddings_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_text_embeddings_with_azure_ai_inference_client.py similarity index 85% rename from sdk/ai/azure-ai-projects/samples/inference/sample_get_embeddings_client.py rename to sdk/ai/azure-ai-projects/samples/inference/sample_text_embeddings_with_azure_ai_inference_client.py index 61ea0adaa289..05dfde82b25d 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_get_embeddings_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_text_embeddings_with_azure_ai_inference_client.py @@ -4,11 +4,10 @@ # ------------------------------------ """ -FILE: sample_get_embeddings_client.py - DESCRIPTION: Given an AIProjectClient, this sample demonstrates how to get an authenticated - async EmbeddingsClient from the azure.ai.inference package. + async EmbeddingsClient from the azure.ai.inference package. For more information + on the azure.ai.inference package see https://pypi.org/project/azure-ai-inference/. USAGE: python sample_get_embeddings_client.py @@ -17,7 +16,7 @@ pip install azure.ai.projects azure-identity - Set this environment variable with your own values: + Set this environment variable with your own value: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import os diff --git a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py index b37415126e4f..4d21e7bf59af 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py @@ -7,19 +7,21 @@ import functools from dotenv import load_dotenv from azure.ai.projects import AIProjectClient +from azure.ai.projects.aio import AIProjectClient as AIProjectClientAsync +from azure.ai.projects.models import ConnectionProperties, ConnectionType, AuthenticationType from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader -""" -Set these environment variables before running the test: -set AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING= -""" + servicePreparerConnectionsTests = functools.partial( EnvironmentVariableLoader, "azure_ai_projects_connections_tests", azure_ai_projects_connections_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;hub-name", + azure_ai_projects_connections_tests_default_aoai_connection_name="default-aoai-connection-name", + azure_ai_projects_connections_tests_default_serverless_connection_name="default-serverless-connection-name", + azure_ai_projects_connections_tests_aoai_connection_name="aoai-connection-name", + azure_ai_projects_connections_tests_serverless_connection_name="serverless-connection-name", ) - # Set to True to enable SDK logging LOGGING_ENABLED = False @@ -44,3 +46,45 @@ def get_sync_client(self, **kwargs) -> AIProjectClient: logging_enable=LOGGING_ENABLED, ) return project_client + + def get_async_client(self, **kwargs) -> AIProjectClientAsync: + conn_str = kwargs.pop("azure_ai_projects_connections_tests_project_connection_string") + project_client = AIProjectClientAsync.from_connection_string( + credential= self.get_credential(AIProjectClient, is_async=True), + conn_str=conn_str, + logging_enable=LOGGING_ENABLED, + ) + return project_client + + @classmethod + def validate_connection( + cls, + connection: ConnectionProperties, + with_credentials: bool, + *, + expected_connection_type: ConnectionType = None, + expected_connection_name: str = None, + expected_authentication_type: AuthenticationType = None + ): + assert connection.id is not None + + if expected_connection_name: + assert connection.name == expected_connection_name + else: + assert connection.name is not None + + if expected_connection_type: + assert connection.connection_type == expected_connection_type + else: + assert connection.connection_type is not None + + if expected_authentication_type: + assert connection.authentication_type == expected_authentication_type + else: + assert connection.authentication_type is not None + + if with_credentials: + assert (connection.key is not None) ^ (connection.token_credential is not None) + else: + assert connection.key == None + assert connection.token_credential == None diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py index d105de6c9c2a..832283818120 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py @@ -5,6 +5,7 @@ from devtools_testutils import recorded_by_proxy from connection_test_base import ConnectionsTestBase, servicePreparerConnectionsTests +from azure.ai.projects.models import ConnectionType # The test class name needs to start with "Test" to get collected by pytest @@ -13,14 +14,110 @@ class TestConnections(ConnectionsTestBase): @servicePreparerConnectionsTests() @recorded_by_proxy def test_connections_get(self, **kwargs): - project_client = self.get_sync_client(**kwargs) + + aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_aoai_connection_name") + serverless_connection = kwargs.pop("azure_ai_projects_connections_tests_serverless_connection_name") + + with self.get_sync_client(**kwargs) as project_client: + + connection = project_client.connections.get( + connection_name=aoai_connection, + with_credentials=False + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) + + connection = project_client.connections.get( + connection_name=aoai_connection, + with_credentials=True + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) + + connection = project_client.connections.get( + connection_name=serverless_connection, + with_credentials=False + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) + + connection = project_client.connections.get( + connection_name=serverless_connection, + with_credentials=True + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) + @servicePreparerConnectionsTests() @recorded_by_proxy def test_connections_get_default(self, **kwargs): - project_client = self.get_sync_client(**kwargs) + + default_aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_default_aoai_connection_name") + default_serverless_connection = kwargs.pop("azure_ai_projects_connections_tests_default_serverless_connection_name") + + with self.get_sync_client(**kwargs) as project_client: + + connection = project_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, + with_credentials=False + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=default_aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) + + connection = project_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, + with_credentials=True + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=default_aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) + + connection = project_client.connections.get_default( + connection_type=ConnectionType.SERVERLESS, + with_credentials=False + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=default_serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) + + connection = project_client.connections.get_default( + connection_type=ConnectionType.SERVERLESS, + with_credentials=True + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=default_serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) + @servicePreparerConnectionsTests() @recorded_by_proxy def test_connections_list(self, **kwargs): - project_client = self.get_sync_client(**kwargs) + with self.get_sync_client(**kwargs) as project_client: + + connections = project_client.connections.list() + count_all = len(connections) + print(f"====> Listing of all connections (found {count_all}):") + for connection in connections: + print(connection) + ConnectionsTestBase.validate_connection(connection, False) + + connections = project_client.connections.list( + connection_type=ConnectionType.AZURE_OPEN_AI, + ) + count_aoai = len(connections) + print("====> Listing of all Azure Open AI connections (found {count_aoai}):") + for connection in connections: + print(connection) + ConnectionsTestBase.validate_connection(connection, False) + + connections = project_client.connections.list( + connection_type=ConnectionType.SERVERLESS, + ) + count_serverless = len(connections) + print("====> Listing of all Serverless connections (found {count_serverless}):") + for connection in connections: + print(connection) + ConnectionsTestBase.validate_connection(connection, False) + + assert count_all > 2 + assert count_all > count_aoai + assert count_all > count_serverless + diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py new file mode 100644 index 000000000000..b26f4a4a2a8a --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py @@ -0,0 +1,123 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from devtools_testutils.aio import recorded_by_proxy_async +from connection_test_base import ConnectionsTestBase, servicePreparerConnectionsTests +from azure.ai.projects.models import ConnectionType + + +# The test class name needs to start with "Test" to get collected by pytest +class TestConnectionsAsync(ConnectionsTestBase): + + @servicePreparerConnectionsTests() + @recorded_by_proxy_async + async def test_connections_get(self, **kwargs): + aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_aoai_connection_name") + serverless_connection = kwargs.pop("azure_ai_projects_connections_tests_serverless_connection_name") + + async with self.get_async_client(**kwargs) as project_client: + + connection = await project_client.connections.get( + connection_name=aoai_connection, + with_credentials=False + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) + + connection = await project_client.connections.get( + connection_name=aoai_connection, + with_credentials=True + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) + + connection = await project_client.connections.get( + connection_name=serverless_connection, + with_credentials=False + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) + + connection = await project_client.connections.get( + connection_name=serverless_connection, + with_credentials=True + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) + + + @servicePreparerConnectionsTests() + @recorded_by_proxy_async + async def test_connections_get_default(self, **kwargs): + + default_aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_default_aoai_connection_name") + default_serverless_connection = kwargs.pop("azure_ai_projects_connections_tests_default_serverless_connection_name") + + async with self.get_async_client(**kwargs) as project_client: + + connection = await project_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, + with_credentials=False + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=default_aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) + + connection = await project_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, + with_credentials=True + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=default_aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) + + connection = await project_client.connections.get_default( + connection_type=ConnectionType.SERVERLESS, + with_credentials=False + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=default_serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) + + connection = await project_client.connections.get_default( + connection_type=ConnectionType.SERVERLESS, + with_credentials=True + ) + print(connection) + ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=default_serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) + + + @servicePreparerConnectionsTests() + @recorded_by_proxy_async + async def test_connections_list_async(self, **kwargs): + async with self.get_async_client(**kwargs) as project_client: + + connections = await project_client.connections.list() + count_all = len(connections) + print(f"====> Listing of all connections (found {count_all}):") + for connection in connections: + print(connection) + ConnectionsTestBase.validate_connection(connection, False) + + connections = await project_client.connections.list( + connection_type=ConnectionType.AZURE_OPEN_AI, + ) + count_aoai = len(connections) + print("====> Listing of all Azure Open AI connections (found {count_aoai}):") + for connection in connections: + print(connection) + ConnectionsTestBase.validate_connection(connection, False) + + connections = await project_client.connections.list( + connection_type=ConnectionType.SERVERLESS, + ) + count_serverless = len(connections) + print("====> Listing of all Serverless connections (found {count_serverless}):") + for connection in connections: + print(connection) + ConnectionsTestBase.validate_connection(connection, False) + + assert count_all > 2 + assert count_all > count_aoai + assert count_all > count_serverless + + diff --git a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py index e9f99199fedd..f413107f2c72 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py @@ -9,11 +9,6 @@ from azure.ai.projects.aio import AIProjectClient as AIProjectClientAsync from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader -""" -Set these environment variables before running the test: -set AZURE_AI_PROJECTS_INFERENCE_TESTS_PROJECT_CONNECTION_STRING= -set AZURE_AI_PROJECTS_INFERENCE_TESTS_MODEL_DEPLOYMENT_NAME= -""" servicePreparerInferenceTests = functools.partial( EnvironmentVariableLoader, "azure_ai_projects_inference_tests", @@ -47,10 +42,10 @@ def get_sync_client(self, **kwargs) -> AIProjectClient: ) return project_client - def get_async_client(self, **kwargs) -> AIProjectClient: + def get_async_client(self, **kwargs) -> AIProjectClientAsync: conn_str = kwargs.pop("azure_ai_projects_inference_tests_project_connection_string") project_client = AIProjectClientAsync.from_connection_string( - credential=self.get_credential(AIProjectClientAsync, is_async=False), + credential= self.get_credential(AIProjectClientAsync, is_async=True), conn_str=conn_str, logging_enable=LOGGING_ENABLED, ) From 222a07d7318f77a820fb5f6a4dbcbfc059cd8cbe Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Tue, 29 Oct 2024 11:29:28 -0800 Subject: [PATCH 058/138] update to get file content (#38165) --- .../azure/ai/projects/aio/_patch.py | 4 ++- .../ai/projects/aio/operations/_operations.py | 12 ++++---- .../azure/ai/projects/models/__init__.py | 2 -- .../azure/ai/projects/models/_models.py | 29 ------------------- .../ai/projects/operations/_operations.py | 12 ++++---- .../async_samples/sample_evaluations_async.py | 11 +++---- .../samples/evaluations/sample_evaluations.py | 9 +++--- sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 8 files changed, 24 insertions(+), 57 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 781d25c372d8..22ecfe1c8c07 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -244,7 +244,8 @@ def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: from azure.ai.ml.constants import AssetTypes except ImportError: raise ImportError( - "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`") + "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" + ) data = Data( path=file_path, @@ -273,6 +274,7 @@ def scope(self) -> Dict[str, str]: "project_name": self._config3.project_name, } + __all__: List[str] = [ "AIProjectClient", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index 0ab9b59da258..d988d121e51c 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -3517,13 +3517,13 @@ async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: return deserialized # type: ignore @distributed_trace_async - async def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentResponse: - """Returns information about a specific file. Does not retrieve file content. + async def get_file_content(self, file_id: str, **kwargs: Any) -> bytes: + """Retrieves the raw content of a specific file. :param file_id: The ID of the file to retrieve. Required. :type file_id: str - :return: FileContentResponse. The FileContentResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.FileContentResponse + :return: bytes + :rtype: bytes :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -3537,7 +3537,7 @@ async def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileCon _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.FileContentResponse] = kwargs.pop("cls", None) + cls: ClsType[bytes] = kwargs.pop("cls", None) _request = build_agents_get_file_content_request( file_id=file_id, @@ -3574,7 +3574,7 @@ async def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileCon if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.FileContentResponse, response.json()) + deserialized = _deserialize(bytes, response.json(), format="base64") if cls: return cls(pipeline_response, deserialized, {}) # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py index 55da8389ac8b..cf380fc258cf 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py @@ -34,7 +34,6 @@ Evaluation, EvaluationSchedule, EvaluatorConfiguration, - FileContentResponse, FileDeletionStatus, FileListResponse, FileSearchToolDefinition, @@ -218,7 +217,6 @@ "Evaluation", "EvaluationSchedule", "EvaluatorConfiguration", - "FileContentResponse", "FileDeletionStatus", "FileListResponse", "FileSearchToolDefinition", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index d11baf407304..794cbb4b7342 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -1137,35 +1137,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class FileContentResponse(_model_base.Model): - """A response from a file get content operation. - - - :ivar content: The content of the file, in bytes. Required. - :vartype content: bytes - """ - - content: bytes = rest_field(format="base64") - """The content of the file, in bytes. Required.""" - - @overload - def __init__( - self, - *, - content: bytes, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - class FileDeletionStatus(_model_base.Model): """A status response from a file deletion operation. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index 2d1a13cfab5d..d85f3ac1f10c 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -4913,13 +4913,13 @@ def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: return deserialized # type: ignore @distributed_trace - def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentResponse: - """Returns information about a specific file. Does not retrieve file content. + def get_file_content(self, file_id: str, **kwargs: Any) -> bytes: + """Retrieves the raw content of a specific file. :param file_id: The ID of the file to retrieve. Required. :type file_id: str - :return: FileContentResponse. The FileContentResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.FileContentResponse + :return: bytes + :rtype: bytes :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -4933,7 +4933,7 @@ def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentRe _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.FileContentResponse] = kwargs.pop("cls", None) + cls: ClsType[bytes] = kwargs.pop("cls", None) _request = build_agents_get_file_content_request( file_id=file_id, @@ -4970,7 +4970,7 @@ def get_file_content(self, file_id: str, **kwargs: Any) -> _models.FileContentRe if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.FileContentResponse, response.json()) + deserialized = _deserialize(bytes, response.json(), format="base64") if cls: return cls(pipeline_response, deserialized, {}) # type: ignore diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py index e27e1a54c528..2cedf876e06a 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py @@ -58,16 +58,15 @@ async def main(): # id=RelevanceEvaluator.id, id="azureml://registries/azureml-staging/models/Relevance-Evaluator/versions/3", init_params={ - "model_config": default_connection.to_evaluator_model_config(deployment_name=deployment_name, - api_version=api_version) + "model_config": default_connection.to_evaluator_model_config( + deployment_name=deployment_name, api_version=api_version + ) }, ), "violence": EvaluatorConfiguration( # id=ViolenceEvaluator.id, id="azureml://registries/azureml-staging/models/Violent-Content-Evaluator/versions/3", - init_params={ - "azure_ai_project": project_client.scope - }, + init_params={"azure_ai_project": project_client.scope}, ), }, ) @@ -86,7 +85,5 @@ async def main(): print("----------------------------------------------------------------") - - if __name__ == "__main__": asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py index 991bd1c56afd..223968489d63 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py @@ -58,16 +58,15 @@ # id=RelevanceEvaluator.id, id="azureml://registries/azureml-staging/models/Relevance-Evaluator/versions/3", init_params={ - "model_config": default_connection.to_evaluator_model_config(deployment_name=deployment_name, - api_version=api_version) + "model_config": default_connection.to_evaluator_model_config( + deployment_name=deployment_name, api_version=api_version + ) }, ), "violence": EvaluatorConfiguration( # id=ViolenceEvaluator.id, id="azureml://registries/azureml-staging/models/Violent-Content-Evaluator/versions/3", - init_params={ - "azure_ai_project": project_client.scope - }, + init_params={"azure_ai_project": project_client.scope}, ), }, ) diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index f54fc30b641c..5a069d0df760 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: d60552bb88d19b2c7cc7978619aeddcc5f9dd602 +commit: d46982df7fb73959661c21c4ee47d56935ea8e22 repo: Azure/azure-rest-api-specs additionalDirectories: From 83dbacf25bee6cdde4192500657bc05ded4934d0 Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Tue, 29 Oct 2024 15:07:01 -0800 Subject: [PATCH 059/138] Jhakulin/parse messages (#38167) * parse messages * update * add test file * update * use stream for get file content * add get_file_content_stream * update * fix merge error * review feedback * more review comments * more review comments --- .../azure/ai/projects/models/_patch.py | 147 ++++- .../azure/ai/projects/operations/_patch.py | 129 ++++- .../agents/nifty_500_quarterly_results.csv | 502 ++++++++++++++++++ ...t.py => sample_agents_code_interpreter.py} | 44 +- .../agents/sample_agents_file_search.py | 2 +- 5 files changed, 792 insertions(+), 32 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/nifty_500_quarterly_results.csv rename sdk/ai/azure-ai-projects/samples/agents/{sample_agents_code_interpreter_attachment.py => sample_agents_code_interpreter.py} (62%) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 15f2a6d2c904..003d0024d0b0 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -35,10 +35,16 @@ CodeInterpreterToolDefinition, CodeInterpreterToolResource, RequiredFunctionToolCall, + OpenAIPageableListOfThreadMessage, + ThreadMessage, + MessageTextContent, + MessageImageFileContent, + MessageTextFileCitationAnnotation, + MessageTextFilePathAnnotation, ) from abc import ABC, abstractmethod -from typing import AsyncIterator, Awaitable, Callable, List, Dict, Any, Type, Optional, Iterator, Tuple, Set, get_origin +from typing import AsyncIterator, Awaitable, Callable, List, Dict, Any, Type, Optional, Iterator, Tuple, Set, get_origin, Union logger = logging.getLogger(__name__) @@ -379,20 +385,33 @@ async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: class FileSearchTool(Tool): """ A tool that searches for uploaded file information from the created vector stores. - """ - def __init__(self, vector_store_ids: List[str] = []): - self.vector_store_ids = vector_store_ids + :param vector_store_ids: A list of vector store IDs to search for files. + :type vector_store_ids: list[str] + """ + def __init__(self, vector_store_ids: Optional[List[str]] = None): + if vector_store_ids is None: + self.vector_store_ids = set() + else: + self.vector_store_ids = set(vector_store_ids) - def add_vector_store(self, store_id: str): + def add_vector_store(self, store_id: str) -> None: """ Add a vector store ID to the list of vector stores to search for files. + + :param store_id: The ID of the vector store to search for files. + :type store_id: str + """ - self.vector_store_ids.append(store_id) + self.vector_store_ids.add(store_id) - def remove_vector_store(self, store_id: str): + def remove_vector_store(self, store_id: str) -> None: """ Remove a vector store ID from the list of vector stores to search for files. + + :param store_id: The ID of the vector store to remove. + :type store_id: str + """ self.vector_store_ids.remove(store_id) @@ -408,7 +427,7 @@ def resources(self) -> ToolResources: """ Get the file search resources. """ - return ToolResources(file_search=FileSearchToolResource(vector_store_ids=self.vector_store_ids)) + return ToolResources(file_search=FileSearchToolResource(vector_store_ids=list(self.vector_store_ids))) def execute(self, tool_call: Any) -> Any: pass @@ -417,24 +436,31 @@ def execute(self, tool_call: Any) -> Any: class CodeInterpreterTool(Tool): """ A tool that interprets code files uploaded to the agent. - """ - def __init__(self): - self.file_ids = [] + :param file_ids: A list of file IDs to interpret. + :type file_ids: list[str] + """ + def __init__(self, file_ids: Optional[List[str]] = None): + if file_ids is None: + self.file_ids = set() + else: + self.file_ids = set(file_ids) - def add_file(self, file_id: str): + def add_file(self, file_id: str) -> None: """ Add a file ID to the list of files to interpret. :param file_id: The ID of the file to interpret. - """ - self.file_ids.append(file_id) + :type file_id: str + """ + self.file_ids.add(file_id) - def remove_file(self, file_id: str): + def remove_file(self, file_id: str) -> None: """ Remove a file ID from the list of files to interpret. :param file_id: The ID of the file to remove. + :type file_id: str """ self.file_ids.remove(file_id) @@ -450,7 +476,7 @@ def resources(self) -> ToolResources: """ Get the code interpreter resources. """ - return ToolResources(code_interpreter=CodeInterpreterToolResource(file_ids=self.file_ids)) + return ToolResources(code_interpreter=CodeInterpreterToolResource(file_ids=list(self.file_ids))) def execute(self, tool_call: Any) -> Any: pass @@ -991,6 +1017,94 @@ def until_done(self) -> None: pass +class ThreadMessages: + """ + Represents a collection of messages in a thread. + + :param pageable_list: The pageable list of messages. + :type pageable_list: ~azure.ai.projects.models.OpenAIPageableListOfThreadMessage + + :return: A collection of messages. + :rtype: ~azure.ai.projects.models.ThreadMessages + """ + def __init__(self, pageable_list: OpenAIPageableListOfThreadMessage): + self._messages = pageable_list.data + + @property + def messages(self) -> List[ThreadMessage]: + """Returns all messages in the messages.""" + return self._messages + + @property + def text_messages(self) -> List[MessageTextContent]: + """Returns all text message contents in the messages.""" + texts = [content for msg in self._messages for content in msg.content if isinstance(content, MessageTextContent)] + return texts + + @property + def image_contents(self) -> List[MessageImageFileContent]: + """Returns all image file contents from image message contents in the messages.""" + return [ + content for msg in self._messages + for content in msg.content + if isinstance(content, MessageImageFileContent) + ] + + @property + def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: + """Returns all file citation annotations from text message annotations in the messages.""" + annotations = [ + annotation for msg in self._messages + for content in msg.content + if isinstance(content, MessageTextContent) + for annotation in content.text.annotations + if isinstance(annotation, MessageTextFileCitationAnnotation) + ] + return annotations + + @property + def file_path_annotations(self) -> List[MessageTextFilePathAnnotation]: + """Returns all file path annotations from text message annotations in the messages.""" + annotations = [ + annotation for msg in self._messages + for content in msg.content + if isinstance(content, MessageTextContent) + for annotation in content.text.annotations + if isinstance(annotation, MessageTextFilePathAnnotation) + ] + return annotations + + def get_last_message_by_sender(self, sender: str) -> Optional[ThreadMessage]: + """Returns the last message from the specified sender. + + :param sender: The role of the sender. + :type sender: str + + :return: The last message from the specified sender. + :rtype: ~azure.ai.projects.models.ThreadMessage + """ + for msg in (self._messages): + if msg.role == sender: + return msg + return None + + def get_last_text_message_by_sender(self, sender: str) -> Optional[MessageTextContent]: + """Returns the last text message from the specified sender. + + :param sender: The role of the sender. + :type sender: str + + :return: The last text message from the specified sender. + :rtype: ~azure.ai.projects.models.MessageTextContent + """ + for msg in (self._messages): + if msg.role == sender: + for content in (msg.content): + if isinstance(content, MessageTextContent): + return content + return None + + __all__: List[str] = [ "AgentEventHandler", "AgentRunStream", @@ -1000,6 +1114,7 @@ def until_done(self) -> None: "AsyncToolSet", "CodeInterpreterTool", "ConnectionProperties", + "ThreadMessages", "FileSearchTool", "FunctionTool", "SASTokenCredential", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index a93ae8a710da..f5946344739b 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -10,8 +10,8 @@ import sys, io, logging, os, time from io import IOBase from typing import List, Iterable, Union, IO, Any, Dict, Optional, overload, TYPE_CHECKING, Iterator, cast +from pathlib import Path -# from zoneinfo import ZoneInfo from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated from ._operations import AgentsOperations as AgentsOperationsGenerated from ._operations import DiagnosticsOperations as DiagnosticsOperationsGenerated @@ -358,6 +358,7 @@ def enable(self, **kwargs) -> bool: class AgentsOperations(AgentsOperationsGenerated): + @overload def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: """Creates a new agent. @@ -1973,7 +1974,7 @@ def create_vector_store_file_batch_and_poll( def create_vector_store_file_batch_and_poll( self, vector_store_id: str, - body: Union[JSON, IO[bytes]] = None, + body: Union[JSON, IO[bytes], None] = None, *, file_ids: List[str] = _Unset, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, @@ -2013,6 +2014,130 @@ def create_vector_store_file_batch_and_poll( ) return vector_store_file_batch + + @distributed_trace + def get_file_content_stream(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: + """ + Returns file content as byte stream for given file_id. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: An iterator that yields bytes from the file content. + :rtype: Iterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: If the HTTP request fails. + """ + kwargs['stream'] = True + response = super().get_file_content(file_id, **kwargs) + return cast(Iterator[bytes], response) + + @distributed_trace + def get_messages( + self, + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any + ) -> _models.ThreadMessages: + """Parses the OpenAIPageableListOfThreadMessage response and returns a ThreadMessages object. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword run_id: Filter messages by the run ID that generated them. Default value is None. + :paramtype run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + + :return: ThreadMessages. The ThreadMessages is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessages + """ + messages = super().list_messages(thread_id, run_id=run_id, limit=limit, order=order, after=after, before=before, **kwargs) + return _models.ThreadMessages(pageable_list=messages) + + @distributed_trace + def save_file( + self, + file_id: str, + file_name: str, + target_dir: Optional[Union[str, Path]] = None + ) -> None: + """ + Saves file content retrieved using a file identifier to the specified local directory. + + :param file_id: The unique identifier for the file to retrieve. + :type file_id: str + :param file_name: The name of the file to be saved. + :type file_name: str + :param target_dir: The directory where the file should be saved. Defaults to the current working directory. + :type target_dir: Union[str, Path] + """ + # Determine target directory + path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() + logger.debug(f"Using target directory: {path}") + + if not path.exists(): + logger.debug(f"Creating non-existent target directory: {path}") + path.mkdir(parents=True, exist_ok=True) + elif not path.is_dir(): + error_msg = f"The target path '{path}' is not a directory." + logger.error(error_msg) + raise ValueError(error_msg) + + # Ensure file_name is properly sanitized + file_name = Path(file_name).name + if not file_name: + error_msg = "The provided file name is invalid." + logger.error(error_msg) + raise ValueError(error_msg) + + # Get file content + try: + file_content_stream = self.get_file_content_stream(file_id) + if not file_content_stream: + error_msg = f"No content retrievable for file ID '{file_id}'." + logger.error(error_msg) + raise RuntimeError(error_msg) + except Exception as e: + error_msg = f"Failed to retrieve file content for file ID '{file_id}': {e}" + logger.error(error_msg) + raise RuntimeError(error_msg) from e + + # Path to save the file + target_file_path = path / file_name + + # Write file content directly from the generator, ensuring each chunk is bytes + try: + with target_file_path.open("wb") as file: + for chunk in file_content_stream: + if isinstance(chunk, (bytes, bytearray)): + file.write(chunk) + else: + raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") + logger.debug(f"File '{file_name}' saved successfully at '{target_file_path}'.") + except TypeError as e: + logger.error(f"Failed due to unexpected chunk type: {e}") + raise + except IOError as e: + error_msg = f"Failed to write to file '{target_file_path}': {e}" + logger.error(error_msg) + raise __all__: List[str] = [ diff --git a/sdk/ai/azure-ai-projects/samples/agents/nifty_500_quarterly_results.csv b/sdk/ai/azure-ai-projects/samples/agents/nifty_500_quarterly_results.csv new file mode 100644 index 000000000000..e02068e09042 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/nifty_500_quarterly_results.csv @@ -0,0 +1,502 @@ +name,NSE_code,BSE_code,sector,industry,revenue,operating_expenses,operating_profit,operating_profit_margin,depreciation,interest,profit_before_tax,tax,net_profit,EPS,profit_TTM,EPS_TTM +3M India Ltd.,3MINDIA,523395,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,"1,057",847.4,192.1,18.48%,12.9,0.7,195.9,49.8,146.1,129.7,535.9,475.7 +ACC Ltd.,ACC,500410,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"4,644.8","3,885.4",549.3,12.39%,212.8,28.9,517.7,131.5,387.9,20.7,"1,202.7",64 +AIA Engineering Ltd.,AIAENG,532683,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,"1,357.1",912.7,382.1,29.51%,24.5,7.4,412.5,88.4,323.1,34.3,"1,216.1",128.9 +APL Apollo Tubes Ltd.,APLAPOLLO,533758,METALS & MINING,IRON & STEEL PRODUCTS,"4,65","4,305.4",325,7.02%,41.3,26.6,276.7,73.8,202.9,7.3,767.5,27.7 +Au Small Finance Bank Ltd.,AUBANK,540611,BANKING AND FINANCE,BANKS,"2,956.5","1,026.7",647.7,25.59%,0,"1,282.1",533.4,131.5,401.8,6,"1,606.2",24 +Adani Ports & Special Economic Zone Ltd.,ADANIPORTS,532921,TRANSPORTATION,MARINE PORT & SERVICES,"6,951.9","2,982.4","3,664",55.13%,974.5,520.1,"2,474.9",759,"1,747.8",8.1,"6,337",29.3 +Adani Energy Solutions Ltd.,ADANIENSOL,ASM,UTILITIES,ELECTRIC UTILITIES,"3,766.5","2,169.3","1,504.6",40.95%,432.1,640.8,369.9,84.9,275.9,2.5,"1,315.1",11.8 +Aditya Birla Fashion and Retail Ltd.,ABFRL,535755,RETAILING,DEPARTMENT STORES,"3,272.2","2,903.6",322.9,10.01%,388.8,208.4,-228.6,-28.2,-179.2,-1.9,-491.7,-5.2 +Aegis Logistics Ltd.,AEGISCHEM,500003,OIL & GAS,OIL MARKETING & DISTRIBUTION,"1,279.3","1,026.5",208.3,16.87%,34.1,26.6,192,42,127,3.6,509,14.5 +Ajanta Pharma Ltd.,AJANTPHARM,532331,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,049.8",737.8,290.7,28.26%,33.7,2.3,275.9,80.6,195.3,15.5,660.2,52.3 +Alembic Pharmaceuticals Ltd.,APLLTD,533573,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,605.1","1,386.7",208.2,13.06%,67.6,15.7,135.1,-1.9,136.6,7,531.7,27 +Alkem Laboratories Ltd.,ALKEM,539523,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"3,503.4","2,693.4",746.7,21.71%,73.9,30.3,648,33.1,620.5,51.9,"1,432.9",119.9 +Amara Raja Energy & Mobility Ltd.,ARE&M,500008,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,988.6","2,556.9",402.5,13.60%,115.7,6.2,309.8,83.5,226.3,13.2,779.8,45.7 +Ambuja Cements Ltd.,AMBUJACEM,500425,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"7,9","6,122.1","1,301.8",17.54%,380.9,61.2,"1,335.7",352.5,793,4,"2,777.9",14 +Apollo Hospitals Enterprise Ltd.,APOLLOHOSP,508869,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"4,869.1","4,219.4",627.5,12.95%,163.4,111.3,376.9,130.2,232.9,16.2,697.5,48.5 +Apollo Tyres Ltd.,APOLLOTYRE,500877,AUTOMOBILES & AUTO COMPONENTS,AUTO TYRES & RUBBER PRODUCTS,"6,304.9","5,119.8","1,159.8",18.47%,360.3,132.8,679.9,205.8,474.3,7.5,"1,590.7",25 +Ashok Leyland Ltd.,ASHOKLEY,500477,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,"11,463","9,558.6","1,870.4",16.37%,226.6,715.1,924.4,358,526,1.8,"2,141.5",7.3 +Asian Paints Ltd.,ASIANPAINT,500820,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,"8,643.8","6,762.3","1,716.2",20.24%,208.7,50.9,"1,621.8",418.6,"1,205.4",12.6,"5,062.6",52.8 +Astral Ltd.,ASTRAL,532830,GENERAL INDUSTRIALS,PLASTIC PRODUCTS,"1,376.4","1,142.9",220.1,16.15%,48.7,8,176.8,45.1,131.2,4.9,549.7,20.4 +Atul Ltd.,ATUL,500027,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"1,215.8","1,038.5",155.2,13.00%,54,1.9,121.5,32.5,90.3,30.6,392.3,132.9 +Aurobindo Pharma Ltd.,AUROPHARMA,524804,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"7,406.4","5,846","1,373.4",19.02%,417.5,68.2,"1,074.7",323.7,757.2,12.8,"2,325.5",39.7 +Avanti Feeds Ltd.,AVANTIFEED,512573,FOOD BEVERAGES & TOBACCO,OTHER FOOD PRODUCTS,"1,312","1,184.5",94,7.35%,14.3,0.2,113,30.5,74.2,5.5,336.4,24.7 +Avenue Supermarts Ltd.,DMART,540376,RETAILING,DEPARTMENT STORES,"12,661.3","11,619.4","1,005",7.96%,174.4,15.6,851.9,228.6,623.6,9.6,"2,332.1",35.8 +Axis Bank Ltd.,AXISBANK,532215,BANKING AND FINANCE,BANKS,"33,122.2","9,207.3","9,166",33.43%,0,"14,749","8,313.8","2,096.1","6,204.1",20.1,"13,121",42.6 +Bajaj Auto Ltd.,BAJAJ-AUTO,532977,AUTOMOBILES & AUTO COMPONENTS,2/3 WHEELERS,"11,206.8","8,708.1","2,130.1",19.65%,91.8,6.5,"2,400.4",564,"2,02",71.4,"6,841.6",241.8 +Bajaj Finance Ltd.,BAJFINANCE,500034,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"13,381.8","2,851.5","9,449.7",70.63%,158.5,"4,537.1","4,757.6","1,207","3,550.8",58.7,"13,118.5",216.7 +Bajaj Finserv Ltd.,BAJAJFINSV,532978,DIVERSIFIED,HOLDING COMPANIES,"26,022.7","14,992.2","9,949.9",38.24%,208.8,"4,449.1","5,292","1,536.5","1,929",12.1,"7,422.6",46.6 +Bajaj Holdings & Investment Ltd.,BAJAJHLDNG,500490,DIVERSIFIED,HOLDING COMPANIES,240.1,33.5,191.2,85.08%,8.4,0.2,197.9,73.9,"1,491.2",134,"5,545.1",498.3 +Balkrishna Industries Ltd.,BALKRISIND,502355,AUTOMOBILES & AUTO COMPONENTS,AUTO TYRES & RUBBER PRODUCTS,"2,360.3","1,720.5",532.7,23.64%,160.4,23.9,455.5,108.1,347.4,18,"1,047.5",54.2 +Balrampur Chini Mills Ltd.,BALRAMCHIN,500038,FOOD BEVERAGES & TOBACCO,SUGAR,"1,649","1,374.6",164.9,10.71%,41.2,17.2,215.9,56.6,166.3,8.2,540.5,26.8 +Bank of Baroda,BANKBARODA,532134,BANKING AND FINANCE,BANKS,"35,766","8,430.4","9,807.9",33.52%,0,"17,527.7","6,022.8","1,679.7","4,458.4",8.5,"18,602.9",35.9 +Bank of India,BANKINDIA,532149,BANKING AND FINANCE,BANKS,"16,779.4","3,704.9","3,818.8",25.35%,0,"9,255.7","2,977.4","1,488.6","1,498.5",3.6,"5,388.7",13.1 +Bata India Ltd.,BATAINDIA,500043,RETAILING,FOOTWEAR,834.6,637.5,181.7,22.18%,81.7,28.4,46.1,12.1,34,2.6,289.7,22.5 +Berger Paints (India) Ltd.,BERGEPAINT,509480,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,"2,782.6","2,293.7",473.6,17.12%,82.9,21.1,385,96.7,291.6,2.5,"1,032.6",8.9 +Bharat Electronics Ltd.,BEL,500049,GENERAL INDUSTRIALS,DEFENCE,"4,146.1","2,994.9","1,014.2",25.30%,108.3,1.5,"1,041.5",260.7,789.4,1.1,"3,323",4.5 +Bharat Forge Ltd.,BHARATFORG,500493,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,"3,826.7","3,152.8",621.4,16.47%,211.3,124.3,336.1,121.8,227.2,4.9,783.7,16.8 +Bharat Heavy Electricals Ltd.,BHEL,500103,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"5,305.4","5,513",-387.7,-7.56%,59.9,180.4,-447.9,-197.9,-238.1,-0.7,71.3,0.2 +Bharat Petroleum Corporation Ltd.,BPCL,500547,OIL & GAS,REFINERIES/PETRO-PRODUCTS,"103,72","90,103.9","12,940.5",12.56%,"1,605.3",973.2,"10,755.7","2,812.2","8,243.5",38.7,"27,505.3",129.2 +Bharti Airtel Ltd.,BHARTIARTL,532454,TELECOM SERVICES,TELECOM SERVICES,"37,374.2","17,530.1","19,513.7",52.68%,"9,734.3","5,185.8","3,353.7","1,846.5","1,340.7",2.4,"7,547",13.2 +Indus Towers Ltd.,INDUSTOWER,534816,TELECOM SERVICES,OTHER TELECOM SERVICES,"7,229.7","3,498.8","3,633.7",50.95%,"1,525.6",458.6,"1,746.7",452,"1,294.7",4.8,"3,333.5",12.4 +Biocon Ltd.,BIOCON,532523,PHARMACEUTICALS & BIOTECHNOLOGY,BIOTECHNOLOGY,"3,620.2","2,720.7",741.6,21.42%,389.3,247.7,238.5,41.6,125.6,1.1,498.4,4.2 +Birla Corporation Ltd.,BIRLACORPN,500335,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"2,313.2","1,997",288.9,12.64%,143.5,95.4,77.1,18.8,58.4,7.6,153.1,19.9 +Blue Dart Express Ltd.,BLUEDART,526612,TRANSPORTATION,TRANSPORTATION - LOGISTICS,"1,329.7","1,101.8",222.7,16.82%,110.6,19.5,97.9,24.8,73.1,30.8,292.4,123.2 +Blue Star Ltd.,BLUESTARCO,500067,CONSUMER DURABLES,CONSUMER ELECTRONICS,"1,903.4","1,767.7",122.7,6.49%,23,17.6,95,24.3,70.7,3.6,437.7,21.3 +Bombay Burmah Trading Corporation Ltd.,BBTC,501425,FOOD BEVERAGES & TOBACCO,TEA & COFFEE,"4,643.5","3,664.7",859.2,18.99%,74.7,154.6,697.1,212.6,122,17.5,"-1,499.5",-214.8 +Bosch Ltd.,BOSCHLTD,500530,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"4,284.3","3,638.8",491.3,11.90%,101.3,12.2,"1,317",318.1,999.8,339,"2,126.9",721 +Brigade Enterprises Ltd.,BRIGADE,532929,REALTY,REALTY,"1,407.9","1,041.8",324.8,23.77%,75.7,110,180.3,67.8,133.5,5.8,298.2,12.9 +Britannia Industries Ltd.,BRITANNIA,500825,FMCG,PACKAGED FOODS,"4,485.2","3,560.5",872.4,19.68%,71.7,53.4,799.7,212.1,587.6,24.4,"2,536.2",105.3 +CCL Products India Ltd.,CCL,519600,FOOD BEVERAGES & TOBACCO,TEA & COFFEE,608.3,497.7,109.9,18.09%,22.6,18.4,69.7,8.8,60.9,4.6,279.9,21 +Crisil Ltd.,CRISIL,500092,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,771.8,544.2,191.7,26.05%,26.5,0.8,200.3,48.3,152,20.8,606.3,82.9 +Zydus Lifesciences Ltd.,ZYDUSLIFE,532321,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"4,422.8","3,222.7","1,146.1",26.23%,184.2,8.7,"1,007.2",226.4,800.7,7.9,"2,807.1",27.7 +Can Fin Homes Ltd.,CANFINHOME,511196,BANKING AND FINANCE,HOUSING FINANCE,871,49.7,749.2,86.01%,2.8,548.4,198,39.9,158.1,11.9,658.8,49.5 +Canara Bank,CANBK,532483,BANKING AND FINANCE,BANKS,"33,891.2","8,250.3","7,706.6",28.24%,0,"17,934.3","5,098","1,420.6","3,86",20.9,"13,968.4",77 +Carborundum Universal Ltd.,CARBORUNIV,513375,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,"1,166",978.8,167.5,14.61%,45.9,4.9,136.4,43.7,101.9,5.4,461.3,24.3 +Castrol India Ltd.,CASTROLIND,500870,OIL & GAS,OIL MARKETING & DISTRIBUTION,"1,203.2",914.4,268.6,22.70%,22.9,2.4,263.5,69.1,194.4,2,815.5,8.2 +Ceat Ltd.,CEATLTD,500878,AUTOMOBILES & AUTO COMPONENTS,AUTO TYRES & RUBBER PRODUCTS,"3,063.8","2,597.2",456.1,14.94%,124.5,71.7,270.4,68.3,208,51.4,521.7,129 +Central Bank of India,CENTRALBK,532885,BANKING AND FINANCE,BANKS,"8,438.5","2,565.4","1,535.4",20.81%,0,"4,337.7",567.2,-41.5,622,0.7,"2,181.4",2.5 +Century Plyboards (India) Ltd.,CENTURYPLY,532548,FOREST MATERIALS,FOREST PRODUCTS,"1,011.4",852.5,144.3,14.47%,23.4,6.1,129.4,32.2,96.9,4.4,380.7,17.1 +Cera Sanitaryware Ltd.,CERA,532443,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,476.2,387.2,76.5,16.49%,8.9,1.4,77.2,19.8,56.9,43.8,232.4,178.7 +Chambal Fertilisers & Chemicals Ltd.,CHAMBLFERT,500085,FERTILIZERS,FERTILIZERS,"5,467.3","4,770.5",615,11.42%,78.4,45.8,572.6,200.2,381,9.2,"1,137.7",27.3 +Cholamandalam Investment & Finance Company Ltd.,CHOLAFIN,511243,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"4,695.2",987.6,"3,235.1",69.99%,38.5,"2,204.2","1,065",288.8,772.9,9.4,"3,022.8",36.7 +Cipla Ltd.,CIPLA,500087,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"6,854.5","4,944.4","1,733.8",25.96%,290,25.8,"1,594.2",438.4,"1,130.9",14,"3,449.1",42.7 +City Union Bank Ltd.,CUB,532210,BANKING AND FINANCE,BANKS,"1,486.1",333.9,386.6,29.65%,0,765.6,330.6,50,280.6,3.8,943.8,12.7 +Coal India Ltd.,COALINDIA,533278,METALS & MINING,COAL,"34,760.3","24,639.4","8,137",24.83%,"1,178.2",182.5,"8,760.2","2,036.5","6,799.8",11,"28,059.6",45.5 +Colgate-Palmolive (India) Ltd.,COLPAL,500830,FMCG,PERSONAL PRODUCTS,"1,492.1",989,482.1,32.77%,44.3,1.1,457.8,117.8,340.1,12.5,"1,173.2",43.1 +Container Corporation of India Ltd.,CONCOR,531344,COMMERCIAL SERVICES & SUPPLIES,WAREHOUSING AND LOGISTICS,"2,299.8","1,648.4",546.5,24.90%,153.1,16.5,481.8,119,367.4,6,"1,186.2",19.5 +Coromandel International Ltd.,COROMANDEL,506395,FERTILIZERS,FERTILIZERS,"7,032.9","5,929.4","1,058.7",15.15%,54,46.2,"1,003.3",245,756.9,25.7,"2,024.2",68.8 +Crompton Greaves Consumer Electricals Ltd.,CROMPTON,539876,CONSUMER DURABLES,HOUSEHOLD APPLIANCES,"1,797.2","1,607.8",174.5,9.79%,32.1,21.5,135.8,34.9,97.2,1.5,432,6.7 +Cummins India Ltd.,CUMMINSIND,500480,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,"2,011.3","1,575.4",346.2,18.02%,38.3,6.8,390.9,99.6,329.1,11.9,"1,445.5",52.1 +Cyient Ltd.,CYIENT,532175,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,792","1,452.7",325.8,18.32%,65.8,27,240.3,56.7,178.3,16.3,665.6,60.1 +DCM Shriram Ltd.,DCMSHRIRAM,523367,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"2,73","2,593.9",114.1,4.21%,74,14.7,47.5,15.2,32.2,2.1,617.6,39.4 +DLF Ltd.,DLF,532868,REALTY,REALTY,"1,476.4",885.3,462.4,34.31%,37,90.2,464,112.2,622.8,2.5,"2,239",9 +Dabur India Ltd.,DABUR,500096,FMCG,PERSONAL PRODUCTS,"3,320.2","2,543",660.9,20.63%,98.3,28.1,650.8,144.3,515,2.9,"1,755.7",9.9 +Delta Corp Ltd.,DELTACORP,532848,COMMERCIAL SERVICES & SUPPLIES,MISC. COMMERCIAL SERVICES,282.6,170.5,100.1,36.99%,16.9,2.7,92.4,23,69.4,2.6,273.3,10.2 +Divi's Laboratories Ltd.,DIVISLAB,532488,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,995","1,43",479,25.09%,95,1,469,121,348,13.1,"1,331.8",50.3 +Dr. Lal Pathlabs Ltd.,LALPATHLAB,539524,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE SERVICES,619.4,423.5,177.8,29.57%,35.9,7.8,152.2,41.5,109.3,13.2,301.4,36.1 +Dr. Reddy's Laboratories Ltd.,DRREDDY,500124,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"7,217.6","4,888.8","2,008.3",29.09%,375.5,35.3,"1,912.5",434.5,"1,482.2",89.1,"5,091.2",305.2 +EID Parry (India) Ltd.,EIDPARRY,500125,FOOD BEVERAGES & TOBACCO,OTHER FOOD PRODUCTS,"9,210.3","8,002","1,057.5",11.67%,101.2,74.2,"1,032.8",246.8,452.3,25.5,991,55.8 +Eicher Motors Ltd.,EICHERMOT,505200,AUTOMOBILES & AUTO COMPONENTS,2/3 WHEELERS,"4,388.3","3,027.4","1,087.2",26.42%,142.5,12.7,"1,205.7",291.1,"1,016.2",37.1,"3,581",130.8 +Emami Ltd.,EMAMILTD,531162,FMCG,PERSONAL PRODUCTS,876,631.2,233.7,27.02%,46.1,2.2,196.4,15.8,178.5,4.1,697.8,16 +Endurance Technologies Ltd.,ENDURANCE,540153,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,560.5","2,226.7",318.3,12.51%,118.4,9.8,205.6,51.1,154.6,11,562.8,40 +Engineers India Ltd.,ENGINERSIN,532178,COMMERCIAL SERVICES & SUPPLIES,CONSULTING SERVICES,833.6,691.3,98.5,12.47%,8.3,0.4,133.6,32.2,127.5,2.3,472.7,8.4 +Escorts Kubota Ltd.,ESCORTS,500495,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,"2,154.4","1,798.6",260.7,12.66%,40.8,3.1,311.9,79.7,223.3,20.6,910.5,82.4 +Exide Industries Ltd.,EXIDEIND,500086,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"4,408.9","3,872.4",499.1,11.42%,141.5,29.7,365.3,95.2,269.4,3.2,872.7,10.3 +Federal Bank Ltd.,FEDERALBNK,500469,BANKING AND FINANCE,BANKS,"6,548.2","1,603.8","1,400.3",24.18%,0,"3,544.1","1,342.7",342.6,994.1,4.3,"3,671.4",15.6 +Finolex Cables Ltd.,FINCABLES,500144,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"1,229.3","1,041.3",146.1,12.30%,10.8,0.4,176.7,52.3,154.2,10.1,643.9,42.1 +Finolex Industries Ltd.,FINPIPE,500940,GENERAL INDUSTRIALS,PLASTIC PRODUCTS,944.5,780.2,103,11.66%,27.4,12.5,124.5,35.4,98,1.6,459.3,7.4 +Firstsource Solutions Ltd.,FSL,532809,SOFTWARE & SERVICES,BPO/KPO,"1,556.9","1,311.2",228.8,14.86%,65.4,26.1,154.3,27.8,126.5,1.9,551.7,7.9 +GAIL (India) Ltd.,GAIL,532155,UTILITIES,UTILITIES,"33,191","29,405.5","3,580.2",10.85%,837.3,199.6,"2,748.7",696.3,"2,444.1",3.7,"5,283.8",8 +GlaxoSmithKline Pharmaceuticals Ltd.,GLAXO,500660,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,985.2,667.5,289.5,30.25%,18.1,0.4,299.2,81.7,217.5,12.8,647.8,38.2 +Glenmark Pharmaceuticals Ltd.,GLENMARK,532296,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"3,209.1","2,745.1",462.3,14.41%,141.5,121.5,-124.4,55.9,-81.9,-2.9,-196.3,-7 +Godrej Consumer Products Ltd.,GODREJCP,532424,FMCG,PERSONAL PRODUCTS,"3,667.9","2,897.8",704.2,19.55%,60.9,77.3,619.4,186.6,432.8,4.2,"1,750.1",17.1 +Godrej Industries Ltd.,GODREJIND,500164,DIVERSIFIED,DIVERSIFIED,"4,256.9","3,672.1",265.5,6.74%,89.3,333.1,162.4,75.9,87.3,2.6,880,26.1 +Godrej Properties Ltd.,GODREJPROP,533150,REALTY,REALTY,605.1,404.7,-61.7,-17.98%,7.4,48,145.1,38.8,66.8,2.4,662.6,23.8 +Granules India Ltd.,GRANULES,532482,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,191",976.5,213,17.90%,52.5,26,136,33.9,102.1,4.2,393.9,16.3 +Great Eastern Shipping Company Ltd.,GESHIP,500620,TRANSPORTATION,SHIPPING,"1,461.5",585.6,643.4,52.35%,186.7,77.1,611.9,17.3,594.7,41.6,"2,520.1",176.5 +Gujarat Alkalies & Chemicals Ltd.,GUJALKALI,530001,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,"1,042.3",926.1,45.2,4.65%,95.2,10.8,10.2,-0.1,-18.4,-2.5,82.7,11.3 +Gujarat Gas Ltd.,GUJGASLTD,539336,UTILITIES,UTILITIES,"4,019.3","3,494.5",496.6,12.44%,117.9,7.8,399.1,102.9,296.2,4.3,"1,254.3",18.2 +Gujarat Narmada Valley Fertilizers & Chemicals Ltd.,GNFC,500670,FERTILIZERS,FERTILIZERS,"2,232","1,911",169,8.12%,78,1,242,64,182,11.7,932,60.1 +Gujarat Pipavav Port Ltd.,GPPL,533248,TRANSPORTATION,MARINE PORT & SERVICES,270.4,102,150.6,59.64%,28.8,2.2,141.1,53.4,92.3,1.9,341.8,7.1 +Gujarat State Fertilizer & Chemicals Ltd.,GSFC,500690,FERTILIZERS,FERTILIZERS,"3,313.2","2,881.4",237.3,7.61%,45.7,1.6,387,78.1,308.9,7.8,"1,056.2",26.5 +Gujarat State Petronet Ltd.,GSPL,532702,UTILITIES,UTILITIES,"4,455.9","3,497.2",913.7,20.72%,165,14.5,779.2,198.7,454.6,8.1,"1,522",27 +HCL Technologies Ltd.,HCLTECH,532281,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"27,037","20,743","5,929",22.23%,"1,01",156,"5,128","1,295","3,832",14.2,"15,445",56.9 +HDFC Bank Ltd.,HDFCBANK,500180,BANKING AND FINANCE,BANKS,"107,566.6","42,037.6","24,279.1",32.36%,0,"41,249.9","20,967.4","3,655","16,811.4",22.2,"54,474.6",71.8 +Havells India Ltd.,HAVELLS,517354,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"3,952.8","3,527",373.4,9.57%,81.2,9.3,335.3,86.2,249.1,4,"1,177.7",18.8 +Hero MotoCorp Ltd.,HEROMOTOCO,500182,AUTOMOBILES & AUTO COMPONENTS,2/3 WHEELERS,"9,741.2","8,173.5","1,359.5",14.26%,187.1,25,"1,355.6",353.1,"1,006.3",50.3,"3,247.6",162.5 +HFCL Ltd.,HFCL,500183,TELECOMMUNICATIONS EQUIPMENT,TELECOM CABLES,"1,128.7",978.9,132.6,11.93%,21.4,34.8,93.5,24,69.4,0.5,305.5,2.1 +Hindalco Industries Ltd.,HINDALCO,500440,METALS & MINING,ALUMINIUM AND ALUMINIUM PRODUCTS,"54,632","48,557","5,612",10.36%,"1,843","1,034","3,231","1,035","2,196",9.9,"8,423",37.9 +Hindustan Copper Ltd.,HINDCOPPER,513599,METALS & MINING,COPPER,392.6,260.2,121.2,31.77%,45.6,4.1,82.6,21.9,60.7,0.6,320.5,3.3 +Hindustan Petroleum Corporation Ltd.,HINDPETRO,500104,OIL & GAS,REFINERIES/PETRO-PRODUCTS,"96,093.4","87,512","8,24",8.61%,"1,247.3",590,"6,744.1","1,616","5,827",41.1,"16,645",117.3 +Hindustan Unilever Ltd.,HINDUNILVR,500696,FMCG,PERSONAL PRODUCTS,"15,806","11,826","3,797",24.30%,297,88,"3,59",931,"2,656",11.3,"10,284",43.8 +Hindustan Zinc Ltd.,HINDZINC,500188,METALS & MINING,ZINC,"7,014","3,652","3,139",46.22%,825,232,"2,305",576,"1,729",4.1,"8,432",20 +Housing and Urban Development Corporation Ltd.,HUDCO,540530,BANKING AND FINANCE,HOUSING FINANCE,"1,880.8",82.7,"1,809.6",97.04%,2.4,"1,216.8",606.4,154.7,451.6,2.3,"1,790.7",8.9 +ITC Ltd.,ITC,500875,FOOD BEVERAGES & TOBACCO,CIGARETTES-TOBACCO PRODUCTS,"18,439.3","11,320.2","6,454.2",36.31%,453,9.9,"6,656.2","1,700.3","4,898.1",3.9,"20,185.1",16.2 +ICICI Bank Ltd.,ICICIBANK,532174,BANKING AND FINANCE,BANKS,"57,292.3","23,911","15,473.2",39.74%,0,"17,908","14,824.2","3,808.8","11,805.6",15.6,"41,086.8",58.7 +ICICI Prudential Life Insurance Company Ltd.,ICICIPRULI,540133,BANKING AND FINANCE,LIFE INSURANCE,"17,958.1","17,612.3",-229.6,-1.32%,0,0,340.2,32.5,243.9,1.7,906.9,6.3 +IDBI Bank Ltd.,IDBI,500116,BANKING AND FINANCE,BANKS,"7,063.7","1,922.3","2,175.3",36.02%,0,"2,966.1","2,396.9","1,003.7","1,385.4",1.3,"4,776.3",4.4 +IDFC First Bank Ltd.,IDFCFIRSTB,539437,BANKING AND FINANCE,BANKS,"8,765.8","3,849","1,511.2",20.54%,0,"3,405.6",982.8,236,746.9,1.1,"2,911.1",4.3 +IDFC Ltd.,IDFC,532659,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),36.7,6,30.6,83.56%,0,0,30.6,6.6,223.5,1.4,"4,147.1",25.9 +IRB Infrastructure Developers Ltd.,IRB,532947,CEMENT AND CONSTRUCTION,ROADS & HIGHWAYS,"1,874.5",950.4,794.6,45.54%,232.7,434.6,256.9,85.8,95.7,0.2,501,0.8 +ITI Ltd.,ITI,523610,TELECOMMUNICATIONS EQUIPMENT,TELECOM EQUIPMENT,256.1,299.3,-52.8,-21.42%,13.3,69.3,-125.8,0,-126,-1.3,-388.4,-4 +Vodafone Idea Ltd.,IDEA,532822,TELECOM SERVICES,TELECOM SERVICES,"10,750.8","6,433.5","4,282.8",39.97%,"5,667.3","6,569","-7,919",817.7,"-8,737.9",-1.8,"-30,986.8",-6.4 +India Cements Ltd.,INDIACEM,530005,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"1,272.4","1,26",4.4,0.35%,55,60.4,-103,-17.4,-80.1,-2.6,-261.1,-8.4 +Indiabulls Housing Finance Ltd.,IBULHSGFIN,535789,BANKING AND FINANCE,HOUSING FINANCE,"2,242.3",190.6,"1,779.2",79.88%,22.9,"1,349.8",421.6,123.6,298,6.5,"1,146",24.3 +Indian Bank,INDIANB,532814,BANKING AND FINANCE,BANKS,"15,929.4","3,599.1","4,327.7",31.44%,0,"8,002.6","2,776.7",768.6,"2,068.5",16.6,"6,893.3",55.3 +Indian Hotels Company Ltd.,INDHOTEL,500850,HOTELS RESTAURANTS & TOURISM,HOTELS,"1,480.9","1,078.4",354.8,24.75%,111.2,59,232.2,72.3,166.9,1.2,"1,100.3",7.7 +Indian Oil Corporation Ltd.,IOC,530965,OIL & GAS,OIL MARKETING & DISTRIBUTION,"179,752.1","156,013.1","23,328.4",13.01%,"3,609.6","2,135","18,090.2","4,699.7","13,114.3",9.5,"38,614.3",27.3 +Indian Overseas Bank,IOB,532388,BANKING AND FINANCE,BANKS,"6,941.5","1,785.1","1,679.8",28.84%,0,"3,476.6",635.5,8.3,627.2,0.3,"2,341.9",1.2 +Indraprastha Gas Ltd.,IGL,532514,UTILITIES,UTILITIES,"3,520.2","2,801.6",656.9,18.99%,102.2,2.5,613.9,151.4,552.7,7.9,"1,806.2",25.8 +IndusInd Bank Ltd.,INDUSINDBK,532187,BANKING AND FINANCE,BANKS,"13,529.7","3,449.9","3,908.7",34.75%,0,"6,171.1","2,934.9",732.9,"2,202.2",28.4,"8,333.7",107.2 +Info Edge (India) Ltd.,NAUKRI,532777,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,792,421.2,204.7,32.70%,25.9,8.2,382.8,68.7,205.1,15.9,-25.6,-2 +InterGlobe Aviation Ltd.,INDIGO,539448,TRANSPORTATION,AIRLINES,"15,502.9","12,743.6","2,200.3",14.72%,"1,549","1,021.3",189.1,0.2,188.9,4.9,"5,621.3",145.7 +Ipca Laboratories Ltd.,IPCALAB,524494,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"2,072.5","1,712.7",321.3,15.80%,90.3,44.1,225.4,87.9,145.1,5.7,492.2,19.4 +J B Chemicals & Pharmaceuticals Ltd.,JBCHEPHARM,506943,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,889.4,638.2,243.5,27.62%,32.2,10.4,208.7,58.1,150.6,9.7,486.6,31.4 +JK Cement Ltd.,JKCEMENT,532644,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"2,782.1","2,285.8",467,16.96%,137.1,115,244.2,65.7,178.1,23.1,444,57.5 +JK Lakshmi Cement Ltd.,JKLAKSHMI,500380,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"1,588.5","1,357.3",217.3,13.80%,56.6,33.6,141,45.1,92.7,7.9,357.6,30.4 +JM Financial Ltd.,JMFINANCIL,523405,DIVERSIFIED,HOLDING COMPANIES,"1,214",407.9,662.6,55.34%,13.2,388.1,277.9,72.4,194.9,2,608.1,6.4 +JSW Energy Ltd.,JSWENERGY,533148,UTILITIES,ELECTRIC UTILITIES,"3,387.4","1,379","1,880.4",57.69%,408.7,513.7,"1,085.9",235.1,850.2,5.2,"1,591.7",9.7 +JSW Steel Ltd.,JSWSTEEL,500228,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"44,821","36,698","7,886",17.69%,"2,019","2,084","4,609","1,812","2,76",11.4,"9,252",38.1 +Jindal Stainless Ltd.,JSL,532508,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"9,829","8,566.5","1,230.6",12.56%,221.9,155.6,985.7,229.1,774.3,9.4,"2,600.2",31.6 +Jindal Steel & Power Ltd.,JINDALSTEL,532286,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"12,282","9,964.5","2,285.7",18.66%,603.7,329.4,"1,384.5",-5.8,"1,387.8",13.8,"4,056",40.4 +Jubilant Foodworks Ltd.,JUBLFOOD,533155,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,"1,375.7","1,091.4",277.2,20.25%,141.9,56.8,85.5,23.3,97.2,1.5,235,3.6 +Just Dial Ltd.,JUSTDIAL,535648,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,318.5,211.8,48.8,18.71%,12.2,2.4,92.1,20.3,71.8,8.4,314.1,36.9 +Jyothy Labs Ltd.,JYOTHYLAB,532926,FMCG,PERSONAL PRODUCTS,745.6,597,135.4,18.48%,12.3,1.2,135.1,31.1,104.2,2.8,326.9,8.9 +KRBL Ltd.,KRBL,530813,FMCG,PACKAGED FOODS,"1,246.5","1,018.9",194.5,16.03%,19.9,0.8,206.8,53.6,153.3,6.5,671.4,29.3 +Kajaria Ceramics Ltd.,KAJARIACER,500233,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,"1,129.9",941.9,179.7,16.02%,36.1,4.3,147.7,36.6,108,6.8,397.8,25 +Kalpataru Projects International Ltd.,KPIL,522287,UTILITIES,ELECTRIC UTILITIES,"4,53","4,148",370,8.19%,113,137,132,42,89,5.5,478,29.9 +Kansai Nerolac Paints Ltd.,KANSAINER,500165,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,"1,978.6","1,683.3",273.2,13.97%,47.4,7.6,240.3,64.8,177.2,2.2,"1,118.8",13.8 +Karur Vysya Bank Ltd.,KARURVYSYA,590003,BANKING AND FINANCE,BANKS,"2,336",616.4,637.9,31.94%,0,"1,081.7",511.5,133.1,378.4,4.7,"1,364.2",17 +KEC International Ltd.,KEC,532714,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"4,514.9","4,224.7",274.3,6.10%,46.5,177.8,65.8,9.9,55.8,2.2,187.9,7.3 +Kotak Mahindra Bank Ltd.,KOTAKBANK,500247,BANKING AND FINANCE,BANKS,"21,559.5","9,681","6,343",46.24%,0,"5,535.5","5,888.3","1,465.5","4,461",22.4,"17,172.7",86.4 +L&T Finance Holdings Ltd.,L&TFH,533519,DIVERSIFIED,HOLDING COMPANIES,"3,482.1",935.3,"1,882.4",58.57%,28.3,"1,324.9",797.4,203.2,595.1,2.4,"2,080.8",8.4 +L&T Technology Services Ltd.,LTTS,540115,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"2,427.7","1,910.9",475.6,19.93%,68.1,12.6,436.1,120.2,315.4,29.8,"1,239.7",117.5 +LIC Housing Finance Ltd.,LICHSGFIN,500253,BANKING AND FINANCE,HOUSING FINANCE,"6,765.9",250.6,"6,095.7",90.10%,13.2,"4,599.9","1,483",291.2,"1,193.5",21.7,"4,164.5",75.7 +Lakshmi Machine Works Ltd.,LAXMIMACH,500252,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,"1,355.5","1,184.5",136,10.30%,23.6,0,147.4,32.3,115.1,107.8,416,389.5 +Laurus Labs Ltd.,LAURUSLABS,540222,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,226.2","1,036.6",187.9,15.34%,93.4,42.4,53.9,14.6,37,0.7,367.8,6.8 +Lupin Ltd.,LUPIN,500257,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"5,079","4,120.8",917.8,18.21%,247.8,80.6,629.7,134.3,489.5,10.8,"1,331.2",29.2 +MMTC Ltd.,MMTC,513377,COMMERCIAL SERVICES & SUPPLIES,COMMODITY TRADING & DISTRIBUTION,-167.2,-180.1,-30.4,14.42%,0.8,1.1,12.1,1.5,52,0.3,174.1,1.2 +MRF Ltd.,MRF,500290,AUTOMOBILES & AUTO COMPONENTS,AUTO TYRES & RUBBER PRODUCTS,"6,287.8","5,060.2","1,156.9",18.61%,351.5,85.5,790.6,203.9,586.7,1383.3,"1,690.9",3988 +Mahanagar Gas Ltd.,MGL,539957,UTILITIES,UTILITIES,"1,772.7","1,250.1",478.9,27.70%,65.8,2.5,454.3,115.8,338.5,34.3,"1,147.8",116.2 +Mahindra & Mahindra Financial Services Ltd.,M&MFIN,532720,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"3,863.5","1,077.5","2,109.3",55.03%,67.1,"1,703.4",369.1,96,281.1,2.3,"1,982.5",16 +Mahindra & Mahindra Ltd.,M&M,500520,AUTOMOBILES & AUTO COMPONENTS,CARS & UTILITY VEHICLES,"35,027.2","28,705.9","5,729.6",16.64%,"1,138.6","1,835.2","3,347.5","1,083.7","2,347.8",21.1,"11,169.4",100.2 +Mahindra Holidays & Resorts India Ltd.,MHRIL,533088,HOTELS RESTAURANTS & TOURISM,HOTELS,672.2,519.3,136,20.76%,83.8,33.3,35.8,14,21.3,1.1,66,3.3 +Manappuram Finance Ltd.,MANAPPURAM,531213,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"2,174",555.6,"1,481.3",68.68%,62.5,689.4,746.7,186.1,558.4,6.6,"1,859.8",22 +Mangalore Refinery And Petrochemicals Ltd.,MRPL,500109,OIL & GAS,REFINERIES/PETRO-PRODUCTS,"22,904.7","20,705.6","2,138.2",9.36%,296,311.2,"1,592",546.2,"1,051.7",6,"3,784.9",21.6 +Marico Ltd.,MARICO,531642,FMCG,PERSONAL PRODUCTS,"2,514","1,979",497,20.07%,39,20,476,116,353,2.7,"1,41",10.9 +Maruti Suzuki India Ltd.,MARUTI,532500,AUTOMOBILES & AUTO COMPONENTS,CARS & UTILITY VEHICLES,"37,902.1","32,282.5","4,790.3",12.92%,794.4,35.1,"4,790.1","1,083.8","3,764.3",124.6,"11,351.8",375.9 +Max Financial Services Ltd.,MFSL,500271,BANKING AND FINANCE,LIFE INSURANCE,"10,189.1","10,024.6",143.9,1.42%,0.8,9.4,158.2,-12.1,147.9,4.3,506.4,14.7 +UNO Minda Ltd.,UNOMINDA,532539,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"3,630.2","3,219.8",401.6,11.09%,125.4,27.2,257.9,73.3,225,3.9,742.4,13 +Motilal Oswal Financial Services Ltd.,MOTILALOFS,532892,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,"1,650.7",724.1,904.5,55.18%,17.3,241.1,657.6,124.2,531.2,35.9,"1,449.3",97.8 +MphasiS Ltd.,MPHASIS,526299,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"3,325.5","2,680.9",595.6,18.18%,89,34,521.7,129.7,391.9,20.8,"1,605.6",85.1 +Muthoot Finance Ltd.,MUTHOOTFIN,533398,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"3,631.9",723.4,"2,801.6",77.69%,22.2,"1,335","1,470.2",374.9,"1,059.6",26.4,"3,982.9",99.2 +Natco Pharma Ltd.,NATCOPHARM,524816,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,060.8",573.4,458,44.41%,43.6,4.2,439.6,70.6,369,20.6,"1,127.4",63 +NBCC (India) Ltd.,NBCC,534309,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"2,129.1","1,957.7",95.5,4.65%,1.3,0,104.6,22.9,79.6,0.4,332.2,1.8 +NCC Ltd.,NCC,500294,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"4,746.4","4,415.9",303.7,6.44%,53.2,153.5,123.8,38.8,77.3,1.2,599.4,9.5 +NHPC Ltd.,NHPC,533098,UTILITIES,ELECTRIC UTILITIES,"3,113.8","1,173.9","1,757.4",59.95%,294.9,104.8,"1,618.3",-75,"1,545.8",1.5,"3,897.8",3.9 +Coforge Ltd.,COFORGE,532541,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"2,285.1","1,935.3",340.9,14.98%,77.2,31.9,240.7,52.8,187.9,29.6,696.2,113.2 +NLC India Ltd.,NLCINDIA,513683,UTILITIES,ELECTRIC UTILITIES,"3,234","2,143",834.6,28.03%,455.1,213.9,"1,700.6",614.7,"1,084.7",7.8,"1,912.3",13.8 +NTPC Ltd.,NTPC,532555,UTILITIES,ELECTRIC UTILITIES,"45,384.6","32,303.2","12,680.2",28.19%,"4,037.7","2,920.5","6,342.9","2,019.7","4,614.6",4.8,"19,125.2",19.7 +Narayana Hrudayalaya Ltd.,NH,539551,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"1,323.6",997.1,308.1,23.61%,55.3,22.9,248.4,21.7,226.6,11.2,737.5,36.1 +National Aluminium Company Ltd.,NATIONALUM,532234,METALS & MINING,ALUMINIUM AND ALUMINIUM PRODUCTS,"3,112","2,646.9",396.5,13.03%,186.2,4,275,68.7,187.3,1,"1,272.4",6.9 +Navin Fluorine International Ltd.,NAVINFLUOR,532504,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,494.9,373.4,98.3,20.84%,24.2,20,77.2,16.6,60.6,12.2,365,73.7 +Oberoi Realty Ltd.,OBEROIRLTY,533273,REALTY,REALTY,"1,243.8",579.2,638.2,52.42%,11.3,56.5,596.8,142.1,456.8,12.6,"1,961.3",53.9 +Oil And Natural Gas Corporation Ltd.,ONGC,500312,OIL & GAS,EXPLORATION & PRODUCTION,"149,388.5","118,618.4","28,255.3",19.24%,"6,698.1","2,603.3","21,564.9","5,633.6","13,734.1",10.9,"43,072.5",34.2 +Oil India Ltd.,OIL,533106,OIL & GAS,EXPLORATION & PRODUCTION,"9,200.1","5,293.3","3,523.2",39.96%,499,278.9,762,67.6,420.7,3.9,"5,874.5",54.2 +Oracle Financial Services Software Ltd.,OFSS,532466,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,509.6",886.4,558.1,38.64%,19,8,596.2,178.8,417.4,48.2,"1,835.1",211.9 +PI Industries Ltd.,PIIND,523642,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,"2,163.8","1,565.5",551.4,26.05%,80.3,7.8,510.2,31.7,480.5,31.7,"1,495.8",98.4 +PNB Housing Finance Ltd.,PNBHOUSING,540173,BANKING AND FINANCE,HOUSING FINANCE,"1,779.4",158.8,"1,574.1",88.54%,11.3,"1,057.3",507.1,124.1,383,14.8,"1,278.7",49.3 +PNC Infratech Ltd.,PNCINFRA,539150,CEMENT AND CONSTRUCTION,ROADS & HIGHWAYS,"1,932.4","1,511.6",399.8,20.92%,40.9,161.3,218.6,70.7,147.9,5.8,614.3,23.9 +PVR INOX Ltd.,PVRINOX,532689,RETAILING,SPECIALTY RETAIL,"2,023.7","1,293.1",706.8,35.34%,308.6,200.3,221.7,55.5,166.3,17,-232.5,-23.7 +Page Industries Ltd.,PAGEIND,532827,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,"1,126.8",891.6,233.5,20.76%,24.6,11.2,199.4,49.1,150.3,134.7,510.7,457.9 +Persistent Systems Ltd.,PERSISTENT,533179,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"2,449","2,006.5",405.2,16.80%,74.4,12.3,355.8,92.5,263.3,35,981.5,127.6 +Petronet LNG Ltd.,PETRONET,532522,OIL & GAS,OIL MARKETING & DISTRIBUTION,"12,686.2","11,317.9","1,214.7",9.69%,194.8,74.7,"1,098.8",283.9,855.7,5.7,"3,490.3",23.3 +Pfizer Ltd.,PFIZER,500680,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,611.3,392.6,182.6,31.75%,15.4,2.7,200.5,51.6,149,32.6,522.8,114.3 +Phoenix Mills Ltd.,PHOENIXLTD,503100,REALTY,REALTY,906.6,361.2,506,57.82%,65.9,96.5,375.2,71.4,252.6,14.2,923.6,51.7 +Pidilite Industries Ltd.,PIDILITIND,500331,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"3,107.6","2,396.3",679.7,22.10%,75.2,13.1,623,163.1,450.1,8.8,"1,505.5",29.6 +Power Finance Corporation Ltd.,PFC,532810,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"22,403.7",315.4,"22,941.9",102.46%,12.7,"14,313.1","8,628.8","2,000.6","4,833.1",14.7,"17,946.4",54.4 +Power Grid Corporation of India Ltd.,POWERGRID,532898,UTILITIES,ELECTRIC UTILITIES,"11,530.4","1,358.7","9,908.4",87.94%,"3,277","2,341.3","4,393.4",573.7,"3,781.4",4.1,"15,344.4",16.5 +Prestige Estates Projects Ltd.,PRESTIGE,ASM,REALTY,REALTY,"3,256","1,643.9",592.5,26.49%,174.1,263.9,"1,174.1",256.4,850.9,21.2,"1,714",42.8 +Prism Johnson Ltd.,PRSMJOHNSN,500338,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"1,846","1,745.4",92.4,5.03%,95.2,43.5,210,30.4,182.7,3.6,154.2,3.1 +Procter & Gamble Hygiene & Healthcare Ltd.,PGHH,500459,FMCG,PERSONAL PRODUCTS,"1,154.1",853.5,284.9,25.03%,14.3,1.9,284.5,73.8,210.7,64.9,734.4,226.3 +Punjab National Bank,PNB,532461,BANKING AND FINANCE,BANKS,"29,857","6,798.1","6,239.1",23.23%,0,"16,819.8","2,778.3","1,013.8","1,990.2",1.8,"5,904.8",5.4 +Quess Corp Ltd.,QUESS,539978,SOFTWARE & SERVICES,BPO/KPO,"4,763.5","4,584.8",163.6,3.44%,69.7,28.1,79.3,8.3,71.9,4.8,240.9,16.2 +RBL Bank Ltd.,RBLBANK,540065,BANKING AND FINANCE,BANKS,"3,720.6","1,422.6",765.4,25.45%,0,"1,532.6",125,-206.1,331.1,5.5,"1,173.9",19.5 +Radico Khaitan Ltd.,RADICO,532497,FOOD BEVERAGES & TOBACCO,BREWERIES & DISTILLERIES,925.7,803.8,121.2,13.10%,26.1,12.5,83.3,21.4,64.8,4.8,237,17.7 +Rain Industries Ltd.,RAIN,500339,CHEMICALS & PETROCHEMICALS,PETROCHEMICALS,"4,208.9","3,794.3",366,8.80%,192.5,241.7,-19.5,46.2,-90.2,-2.7,270.4,8 +Rajesh Exports Ltd.,RAJESHEXPO,531500,TEXTILES APPARELS & ACCESSORIES,GEMS & JEWELLERY,"38,079.4","38,015.8",50.1,0.13%,10.7,0,53,7.7,45.3,1.5,"1,142.2",38.7 +Rallis India Ltd.,RALLIS,500355,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,837,699,133,15.99%,26,3,110,28,82,4.2,98.4,5.2 +Rashtriya Chemicals & Fertilizers Ltd.,RCF,524230,FERTILIZERS,FERTILIZERS,"4,222.1","4,049.3",105.9,2.55%,56.1,44,72.8,21.1,51,0.9,523.6,9.5 +Redington Ltd.,REDINGTON,532805,COMMERCIAL SERVICES & SUPPLIES,COMMODITY TRADING & DISTRIBUTION,"22,296.6","21,738.7",481.4,2.17%,43.7,105.8,408.3,96.7,303.5,3.9,"1,242",15.9 +Relaxo Footwears Ltd.,RELAXO,530517,RETAILING,FOOTWEAR,725.9,623.8,91.5,12.79%,36.9,4.7,60.4,16.2,44.2,1.8,193.9,7.8 +Reliance Industries Ltd.,RELIANCE,500325,OIL & GAS,REFINERIES/PETRO-PRODUCTS,"238,797","193,988","40,968",17.44%,"12,585","5,731","26,493","6,673","17,394",25.7,"68,496",101.2 +REC Ltd.,RECLTD,532955,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"11,701.3",275.1,"12,180.5",104.21%,6.1,"7,349.8","4,837.6","1,047.7","3,789.9",14.4,"12,738.6",48.4 +SJVN Ltd.,SJVN,533206,UTILITIES,ELECTRIC UTILITIES,951.6,172.2,706.2,80.40%,101.9,124.2,567.7,129.2,439.6,1.1,"1,016",2.6 +SKF India Ltd.,SKFINDIA,500472,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,"1,145.5","1,003.7",121.5,10.80%,19.3,0.5,122,31.7,90,18.2,484,97.9 +SRF Ltd.,SRF,503806,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"3,206.5","2,551.2",626.2,19.71%,161.2,79.3,414.8,114,300.8,10.2,"1,733.4",58.5 +Sanofi India Ltd.,SANOFI,500674,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,726.4,506.1,208.5,29.17%,9.9,0.3,210.1,57.9,152.1,66.1,596.3,259.3 +Schaeffler India Ltd.,SCHAEFFLER,505790,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,879.2","1,506.3",342,18.50%,55.6,1.6,315.7,80.7,235,15,922.6,59 +Shree Cements Ltd.,SHREECEM,500387,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"4,932.1","3,914.1",886,18.46%,411.7,67,539.2,92.6,446.6,123.8,"1,826.8",506.3 +Shriram Finance Ltd.,SHRIRAMFIN,511218,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"8,893","1,409.4","6,334.3",71.30%,141.4,"3,798","2,404.2",614.9,"1,786.1",47.6,"6,575.4",175.2 +Siemens Ltd.,SIEMENS,500550,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"5,953.2","5,107.5",700.2,12.06%,78.6,4.9,762.2,190.5,571.3,16.1,"1,960.9",55.1 +Sobha Ltd.,SOBHA,532784,REALTY,REALTY,773.6,665.8,75.4,10.18%,19.3,63.9,24.7,9.7,14.9,1.6,107.4,11.3 +Solar Industries India Ltd.,SOLARINDS,532725,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,"1,355.2","1,011.3",336.1,24.95%,33.7,24.9,285.3,75.5,200.1,22.1,808.2,89.3 +Sonata Software Ltd.,SONATSOFTW,532221,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,935.8","1,715.2",197.3,10.32%,33.3,20.7,166.5,42.3,124.2,9,475.7,34.3 +State Bank of India,SBIN,500112,BANKING AND FINANCE,BANKS,"144,256.1","58,597.6","22,703.3",21.14%,0,"62,955.2","21,935.7","5,552.5","17,196.2",18,"69,304.1",77.7 +Steel Authority of India (SAIL) Ltd.,SAIL,500113,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"29,858.2","25,836.7","3,875.4",13.04%,"1,326.6",605.2,"1,674.7",464.2,"1,305.6",3.2,"3,219.5",7.8 +Sun Pharma Advanced Research Company Ltd.,SPARC,532872,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,29.7,112.7,-91.5,-431.87%,3.2,0.3,-86.4,0,-86.4,-2.7,-253.6,-7.8 +Sun Pharmaceutical Industries Ltd.,SUNPHARMA,524715,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"12,486","9,013","3,179.4",26.08%,632.8,49.3,"2,790.9",390.1,"2,375.5",9.9,"8,548.5",35.6 +Sun TV Network Ltd.,SUNTV,532733,MEDIA,BROADCASTING & CABLE TV,"1,160.2",320.6,727.8,69.42%,218.8,1.7,619.1,154.4,464.7,11.8,"1,861.8",47.2 +Sundram Fasteners Ltd.,SUNDRMFAST,500403,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,429.1","1,191.1",230.7,16.23%,54.5,7.4,176.2,43.1,131.9,6.3,502.9,23.9 +Sunteck Realty Ltd.,SUNTECK,512179,REALTY,REALTY,36.2,39.1,-14.1,-56.70%,2.2,15.8,-20.9,-6.4,-13.9,-1,-46.5,-3.3 +Supreme Industries Ltd.,SUPREMEIND,509930,GENERAL INDUSTRIALS,PLASTIC PRODUCTS,"2,321.4","1,952.5",356.2,15.43%,71.9,1.6,295.4,76.3,243.2,19.1,"1,028.2",80.9 +Suzlon Energy Ltd.,SUZLON,ASM,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"1,428.7","1,196.4",225,15.83%,51.2,43.7,102.4,0.1,102.3,0.1,561.4,0.4 +Syngene International Ltd.,SYNGENE,539268,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,931.7,656,254.1,27.92%,104.6,13,150.7,34.2,116.5,2.9,498.3,12.4 +TTK Prestige Ltd.,TTKPRESTIG,517506,CONSUMER DURABLES,HOUSEWARE,747.2,648.6,80.8,11.08%,15.9,3.1,79.5,20.5,59.3,4.3,224.3,16.2 +TV18 Broadcast Ltd.,TV18BRDCST,532800,MEDIA,BROADCASTING & CABLE TV,"1,989","1,992.2",-198.1,-11.04%,50.1,33.8,-87.1,-6.5,-28.9,-0.2,92.2,0.5 +TVS Motor Company Ltd.,TVSMOTOR,532343,AUTOMOBILES & AUTO COMPONENTS,2/3 WHEELERS,"9,983.8","8,576.9","1,355.9",13.65%,237.1,483.3,686.4,259.8,386.3,8.1,"1,457.6",30.7 +Tata Consultancy Services Ltd.,TCS,532540,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"60,698","43,946","15,746",26.38%,"1,263",159,"15,33","3,95","11,342",31,"44,654",122 +Tata Elxsi Ltd.,TATAELXSI,500408,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,912.8,618.2,263.5,29.89%,25,5.8,263.9,63.8,200,32.1,785.1,126.1 +Tata Consumer Products Ltd.,TATACONSUM,500800,FMCG,PACKAGED FOODS,"3,823.6","3,196.7",537.1,14.38%,93.9,27.6,490.9,131.7,338.2,3.6,"1,275.2",13.7 +Tata Motors Limited (DVR),TATAMTRDVR,570001,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,,,,,,,,,,,, +Tata Motors Ltd.,TATAMOTORS,500570,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,"106,759","91,361.3","13,766.9",13.10%,"6,636.4","2,651.7","5,985.9","2,202.8","3,764",9.8,"15,332.3",40 +Tata Power Company Ltd.,TATAPOWER,500400,UTILITIES,ELECTRIC UTILITIES,"16,029.5","12,647","3,091",19.64%,925.9,"1,181.8",979.2,213.3,875.5,2.7,"3,570.8",11.2 +Tata Steel Ltd.,TATASTEEL,500470,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"55,910.2","51,414.1","4,267.8",7.66%,"2,479.8","1,959.4","-6,842.1",-228,"-6,196.2",-5.1,"-6,081.3",-5 +Tech Mahindra Ltd.,TECHM,532755,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"13,128.1","11,941.1",922.8,7.17%,465.7,97.5,623.8,110,493.9,5.6,"3,600.7",40.9 +The Ramco Cements Ltd.,RAMCOCEM,500260,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"2,352.1","1,935",405.6,17.33%,162.8,116.5,137.8,37,72,3.1,348.9,14.8 +Thermax Ltd.,THERMAX,500411,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"2,368.3","2,097.8",204.6,8.89%,33,19.8,217.7,58.9,157.7,14,498.8,44.3 +Timken India Ltd.,TIMKEN,522113,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,692.1,546.5,135.5,19.87%,21.1,0.9,123.6,30.6,93,12.4,358.3,47.6 +Titan Company Ltd.,TITAN,500114,TEXTILES APPARELS & ACCESSORIES,GEMS & JEWELLERY,"12,653","11,118","1,411",11.26%,144,140,"1,251",336,915,10.3,"3,302",37.1 +Torrent Pharmaceuticals Ltd.,TORNTPHARM,500420,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"2,686","1,835",825,31.02%,201,91,559,173,386,11.4,"1,334",39.4 +Torrent Power Ltd.,TORNTPOWER,532779,UTILITIES,ELECTRIC UTILITIES,"7,069.1","5,739.5","1,221.4",17.55%,341.7,247.2,740.7,198.1,525.9,10.9,"2,176.8",45.3 +Trent Ltd.,TRENT,500251,RETAILING,DEPARTMENT STORES,"3,062.5","2,525.8",456.6,15.31%,152.2,95.5,288.9,86.3,234.7,6.6,629.4,17.7 +Trident Ltd.,TRIDENT,521064,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"1,812","1,557.3",240.3,13.37%,89.4,35,130.4,40.1,90.7,0.2,458.1,0.9 +UPL Ltd.,UPL,512070,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,"10,275","8,807","1,325",13.03%,657,871,-185,-96,-189,-2.5,"1,856",24.7 +UltraTech Cement Ltd.,ULTRACEMCO,532538,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"16,179.3","13,461.2","2,550.9",15.93%,797.8,233.9,"1,686.2",409.4,"1,281.5",44.5,"5,694.1",197.2 +Union Bank of India,UNIONBANK,532477,BANKING AND FINANCE,BANKS,"28,952.5","6,189.3","7,265",29.38%,0,"15,498.2","5,492.3","1,944","3,571.8",5.1,"11,918.9",16.1 +United Breweries Ltd.,UBL,532478,FOOD BEVERAGES & TOBACCO,BREWERIES & DISTILLERIES,"1,902.1","1,705.8",184.3,9.75%,50.9,1.4,144,36.9,107.3,4.1,251.3,9.5 +United Spirits Ltd.,MCDOWELL-N,532432,FOOD BEVERAGES & TOBACCO,BREWERIES & DISTILLERIES,"6,776.6","6,269.8",466.7,6.93%,65.3,26.2,446,106.3,339.3,4.8,"1,133",15.6 +V-Guard Industries Ltd.,VGUARD,532953,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"1,147.9","1,041.3",92.5,8.16%,19.8,9.3,77.5,18.6,59,1.4,215.2,5 +Vardhman Textiles Ltd.,VTL,502986,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"2,487","2,192.1",205.4,8.57%,103.7,22,169.2,41.7,134.3,4.7,531.9,18.7 +Varun Beverages Ltd.,VBL,540180,FOOD BEVERAGES & TOBACCO,NON-ALCOHOLIC BEVERAGES,"3,889","2,988.4",882.1,22.79%,170.8,62.5,667.3,152.9,501.1,3.9,"1,998.7",15.4 +Vinati Organics Ltd.,VINATIORGA,524200,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,464.4,337.3,110.8,24.73%,13.7,0.3,113,28.9,84.2,8.2,408.2,39.7 +Voltas Ltd.,VOLTAS,500575,CONSUMER DURABLES,CONSUMER ELECTRONICS,"2,363.7","2,222.5",70.3,3.06%,11.7,11.4,118.1,49.3,36.7,1.1,199.5,6 +ZF Commercial Vehicle Control Systems India Ltd.,ZFCVINDIA,533023,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,015.8",846.2,145.5,14.67%,27.1,1.3,141.2,35.5,105.7,55.7,392,206.7 +Welspun Corp Ltd.,WELCORP,ASM,METALS & MINING,IRON & STEEL PRODUCTS,"4,161.4","3,659.9",399.5,9.84%,85.7,75,340.8,79,384.7,14.7,809.2,30.9 +Welspun Living Ltd.,WELSPUNLIV,514162,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"2,542.4","2,151.1",358,14.27%,98.5,33.8,258.9,58.7,196.7,2,526.1,5.4 +Whirlpool of India Ltd.,WHIRLPOOL,500238,CONSUMER DURABLES,CONSUMER ELECTRONICS,"1,555.5","1,448.4",73.2,4.81%,49.2,5.6,52.3,14.1,36.6,2.9,198.8,15.7 +Wipro Ltd.,WIPRO,507685,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"23,255.7","18,543.2","3,972.7",17.64%,897,303.3,"3,512.2",841.9,"2,646.3",5.1,"11,643.8",22.3 +Zee Entertainment Enterprises Ltd.,ZEEL,505537,MEDIA,BROADCASTING & CABLE TV,"2,509.6","2,105",332.8,13.65%,77.2,23.4,184.2,54.4,123,1.3,-102.2,-1.1 +eClerx Services Ltd.,ECLERX,532927,SOFTWARE & SERVICES,BPO/KPO,735.9,517,204.7,28.37%,30.3,6.1,182.4,46.3,136,28.2,506,105 +Sterlite Technologies Ltd.,STLTECH,532374,TELECOMMUNICATIONS EQUIPMENT,TELECOM CABLES,"1,497","1,281",213,14.26%,85,95,36,12,34,0.9,203,5.1 +HEG Ltd.,HEG,509631,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,642.2,512.3,101.9,16.58%,38.5,8.5,82.9,21.7,96,24.9,439.5,113.9 +SBI Life Insurance Company Ltd.,SBILIFE,540719,BANKING AND FINANCE,LIFE INSURANCE,"28,816.2","28,183.8",609.9,2.12%,0,0,621.5,43.9,380.2,3.8,"1,842.2",18.4 +General Insurance Corporation of India,GICRE,540755,BANKING AND FINANCE,GENERAL INSURANCE,"13,465.9","11,574","1,464.6",11.20%,0,0,"1,855.4",243.7,"1,689",15.2,"6,628",37.8 +Tube Investments of India Ltd.,TIINDIA,540762,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,005.4","1,718.2",251.4,12.76%,34.6,7.7,244.8,63.4,181.4,9.4,717.5,37.1 +Honeywell Automation India Ltd.,HONAUT,517174,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"1,144.3",965.9,138.3,12.52%,13.8,0.7,163.9,42,121.9,137.8,443.4,503.9 +Indian Energy Exchange Ltd.,IEX,540750,BANKING AND FINANCE,EXCHANGE,133,16.6,92,84.73%,5.1,0.7,110.6,27.9,86.5,1,327.8,3.7 +ICICI Lombard General Insurance Company Ltd.,ICICIGI,540716,BANKING AND FINANCE,GENERAL INSURANCE,"5,271.1","4,612.4",743.5,14.16%,0,0,763.6,186.4,577.3,11.8,"1,757.1",35.8 +Aster DM Healthcare Ltd.,ASTERDM,540975,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"3,325.2","2,939.4",377.3,11.38%,227.2,101.9,2.1,10.2,-30.8,-0.6,284.3,5.7 +Central Depository Services (India) Ltd.,CDSL,CDSL,OTHERS,INVESTMENT COMPANIES,230.1,77.9,129.4,62.40%,6.5,0,145.6,35.8,108.9,10.4,320.2,30.6 +Graphite India Ltd.,GRAPHITE,509488,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,884,823,-30,-3.78%,19,4,992,190,804,41.1,856,43.9 +Grasim Industries Ltd.,GRASIM,500300,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"30,505.3","25,995.9","4,224.8",13.98%,"1,245.2",397.8,"2,866.4",837.7,"1,163.8",17.7,"6,624.9",100.6 +KNR Constructions Ltd.,KNRCON,532942,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"1,043.8",806.9,231.6,22.30%,39.2,20.6,177.1,34.6,147.4,5.2,537.5,19.1 +Aditya Birla Capital Ltd.,ABCAPITAL,540691,DIVERSIFIED,HOLDING COMPANIES,"7,730.4","4,550.1","2,821.9",36.55%,48,"1,827",956.8,284.1,705,2.7,"5,231.9",20.1 +Dixon Technologies (India) Ltd.,DIXON,540699,CONSUMER DURABLES,CONSUMER ELECTRONICS,"4,943.9","4,744.3",198.9,4.02%,36.4,17.1,146.1,35.2,107.3,19,308.7,51.8 +Cholamandalam Financial Holdings Ltd.,CHOLAHLDNG,504973,DIVERSIFIED,HOLDING COMPANIES,"6,372.2","2,495.1","3,404.8",54.05%,52.1,"2,209.4","1,215.8",324.6,420.9,22.4,"1,532.3",81.6 +Cochin Shipyard Ltd.,COCHINSHIP,540678,TRANSPORTATION,MARINE PORT & SERVICES,"1,100.4",820.5,191.2,18.90%,18.9,9.6,251.4,69.9,181.5,13.8,429.9,32.7 +Bharat Dynamics Ltd.,BDL,541143,GENERAL INDUSTRIALS,DEFENCE,694.1,481.8,134,21.77%,17.4,0.8,194.1,47,147.1,8,425.4,23.2 +Lux Industries Ltd.,LUXIND,539542,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,643.6,584.2,55,8.61%,5.9,5.4,48,12.1,37.1,12.3,103.1,32.9 +Zensar Technologies Ltd.,ZENSARTECH,504067,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,277.1","1,009.9",230.9,18.61%,36.6,5.7,224.9,51,173.9,7.7,525.8,23.2 +PCBL Ltd.,PCBL,506590,CHEMICALS & PETROCHEMICALS,CARBON BLACK,"1,489.4","1,248.6",238.1,16.02%,48.2,21,171.6,48.8,122.6,3.2,431.6,11.4 +Zydus Wellness Ltd.,ZYDUSWELL,531335,FMCG,PACKAGED FOODS,444,423.1,16.8,3.82%,5.8,6.5,8.6,2.7,5.9,0.9,281.2,44.2 +Linde India Ltd.,LINDEINDIA,523457,GENERAL INDUSTRIALS,INDUSTRIAL GASES,729.9,537.7,173.6,24.41%,49.7,1.2,141.3,34.6,108.7,12.8,417.9,49 +FDC Ltd.,FDC,531599,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,513.6,409.9,76.4,15.71%,9.9,1.1,92.7,22.9,69.8,4.2,251.2,15.4 +The New India Assurance Company Ltd.,NIACL,540769,BANKING AND FINANCE,GENERAL INSURANCE,"10,571","10,773.4",-246.5,-2.33%,0,0,-242,-46.7,-176.1,-1.1,947,5.7 +Sundaram Finance Ltd.,SUNDARMFIN,590071,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"1,710.6",322.5,"1,332.1",77.98%,43.6,820.3,470.6,142.8,365.4,33.2,"1,506.7",135.6 +TeamLease Services Ltd.,TEAMLEASE,539658,COMMERCIAL SERVICES & SUPPLIES,MISC. COMMERCIAL SERVICES,"2,285.6","2,240.8",31.8,1.40%,12.9,2.5,29.4,1.8,27.3,16.3,106.6,63.5 +Galaxy Surfactants Ltd.,GALAXYSURF,540935,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,985.8,858.2,124.9,12.70%,24.7,5.4,97.5,20.1,77.4,21.8,349.3,98.5 +Bandhan Bank Ltd.,BANDHANBNK,541153,BANKING AND FINANCE,BANKS,"5,032.2","1,400.2","1,583.4",35.25%,0,"2,048.6",947.2,226.1,721.2,4.5,"2,541.1",15.8 +ICICI Securities Ltd.,ISEC,541179,BANKING AND FINANCE,CAPITAL MARKETS,"1,249",433.5,810.2,64.87%,25.8,215.1,569.4,145.7,423.6,13.1,"1,238.1",38.3 +V-Mart Retail Ltd.,VMART,534976,RETAILING,DEPARTMENT STORES,551.4,548.8,0.7,0.12%,53.2,35.9,-86.4,-22.3,-64.1,-32.4,-103.1,-52.1 +Nippon Life India Asset Management Ltd.,NAM-INDIA,540767,BANKING AND FINANCE,ASSET MANAGEMENT COS.,475.4,156.1,241.4,60.73%,7.2,1.7,310.4,66.1,244.4,3.9,883.3,14.1 +Grindwell Norton Ltd.,GRINDWELL,506076,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,690,536,131.4,19.69%,16.9,1.8,135.3,33.1,101.9,9.2,378.3,34.2 +HDFC Life Insurance Company Ltd.,HDFCLIFE,540777,BANKING AND FINANCE,LIFE INSURANCE,"23,276.6","23,659.3",-508.1,-2.20%,0,0,-373.1,-657.5,378.2,1.8,"1,472.8",6.9 +Elgi Equipments Ltd.,ELGIEQUIP,522074,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,817.8,663.4,142.7,17.71%,18.7,6.6,129.2,38.8,91.3,2.9,401.9,12.7 +Hindustan Aeronautics Ltd.,HAL,541154,GENERAL INDUSTRIALS,DEFENCE,"6,105.1","4,108.1","1,527.6",27.11%,349.6,0.3,"1,647",414.8,"1,236.7",18.5,"6,037.3",90.3 +BSE Ltd.,BSE,BSE,BANKING AND FINANCE,EXCHANGE,367,172.8,189.2,52.26%,22.7,8.5,163,63.6,120.5,8.8,706,52.1 +Rites Ltd.,RITES,541556,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,608.8,444.5,137.8,23.67%,14.1,1.4,148.8,40.1,101.2,4.2,488.1,20.3 +Fortis Healthcare Ltd.,FORTIS,532843,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"1,783.5","1,439.8",330.2,18.65%,84.1,31.8,231.4,48.8,173.7,2.3,547.6,7.3 +Varroc Engineering Ltd.,VARROC,541578,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,893.5","1,692.6",194.3,10.30%,84.9,50.3,65.9,18.2,54.2,3.5,146.5,9.6 +Adani Green Energy Ltd.,ADANIGREEN,ASM,UTILITIES,ELECTRIC UTILITIES,"2,589",521,"1,699",76.53%,474,"1,165",413,119,372,2.2,"1,305",8.2 +VIP Industries Ltd.,VIPIND,507880,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,548.7,493.2,52.9,9.68%,23.8,12.4,19.3,6,13.3,0.9,110.9,7.8 +CreditAccess Grameen Ltd.,CREDITACC,541770,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"1,247.6",248.8,902.3,72.36%,12.3,423.9,466.8,119.7,347,21.8,"1,204.2",75.7 +CESC Ltd.,CESC,500084,UTILITIES,ELECTRIC UTILITIES,"4,414","3,706",646,14.84%,303,305,461,98,348,2.6,"1,447",10.9 +Jamna Auto Industries Ltd.,JAMNAAUTO,520051,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,608.7,528.2,79.1,13.03%,10.9,0.8,68.7,18.6,50.1,2.4,189.3,4.7 +Suprajit Engineering Ltd.,SUPRAJIT,532509,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,727.6,639.1,69.8,9.85%,25.7,13.6,49.2,14.5,34.8,2.5,146.9,10.6 +JK Paper Ltd.,JKPAPER,532162,COMMERCIAL SERVICES & SUPPLIES,PAPER & PAPER PRODUCTS,"1,708.8","1,242.8",407.3,24.68%,83.5,42,340.6,34.9,302.4,17.9,"1,220.6",72.1 +Bank of Maharashtra,MAHABANK,532525,BANKING AND FINANCE,BANKS,"5,735.5","1,179.4","1,920.5",37.90%,0,"2,635.7",935.7,16,919.8,1.3,"3,420.8",4.8 +Aavas Financiers Ltd.,AAVAS,541988,BANKING AND FINANCE,HOUSING FINANCE,497.6,123.5,367.8,74.03%,7.6,203.6,157.4,35.7,121.7,15.4,465.4,58.8 +HDFC Asset Management Company Ltd.,HDFCAMC,541729,BANKING AND FINANCE,ASSET MANAGEMENT COS.,765.4,162,481.1,74.81%,13,2.3,588.1,151.6,436.5,20.4,"1,659.3",77.7 +KEI Industries Ltd.,KEI,517569,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"1,954.2","1,742.7",203.9,10.47%,15.6,7.5,188.4,48.2,140.2,15.5,528.3,58.5 +Orient Electric Ltd.,ORIENTELEC,541301,CONSUMER DURABLES,CONSUMER ELECTRONICS,570.3,546.2,20.7,3.65%,14.2,5.2,23.4,4.9,18.4,0.9,95.3,4.5 +Deepak Nitrite Ltd.,DEEPAKNTR,506401,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,"1,795.1","1,475.8",302.3,17.00%,39.4,2.7,277.2,72.1,205.1,15,797.9,58.5 +Fine Organic Industries Ltd.,FINEORG,541557,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,557.6,409.4,131.1,24.25%,14.4,0.7,133.1,28.9,103.4,33.7,458.8,149.6 +LTIMindtree Ltd.,LTIM,540005,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"9,048.6","7,274.1","1,631.3",18.32%,208.2,47,"1,519.3",357,"1,161.8",39.3,"4,427.5",149.6 +Dalmia Bharat Ltd.,DALBHARAT,542216,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"3,234","2,56",589,18.70%,401,101,172,48,118,6.3,"1,041",54.8 +Godfrey Phillips India Ltd.,GODFRYPHLP,500163,FOOD BEVERAGES & TOBACCO,CIGARETTES-TOBACCO PRODUCTS,"1,412.5","1,151",223.6,16.27%,36.5,6.6,218.5,55.5,202.1,38.9,802.9,154.4 +Vaibhav Global Ltd.,VAIBHAVGBL,532156,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,708.4,641.5,63.5,9.01%,22.6,2.9,41.4,12.4,29.4,1.8,121.3,7.3 +Abbott India Ltd.,ABBOTINDIA,500488,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,549.7","1,113.3",380.9,25.49%,17.8,3.1,415.4,102.5,312.9,147.3,"1,081.4",508.9 +Adani Total Gas Ltd.,ATGL,ASM,UTILITIES,UTILITIES,"1,104.8",815.7,279.9,25.55%,37.6,27.3,224.2,57.2,172.7,1.6,571,5.2 +Nestle India Ltd.,NESTLEIND,500790,FMCG,PACKAGED FOODS,"5,070.1","3,811.9","1,224.9",24.32%,111.2,31.4,"1,222",313.9,908.1,94.2,"2,971.1",308.2 +Bayer Cropscience Ltd.,BAYERCROP,506285,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,"1,633.3","1,312.3",304.9,18.85%,11.6,3.7,305.7,82.8,222.9,49.6,844.4,188.1 +Amber Enterprises India Ltd.,AMBER,540902,CONSUMER DURABLES,CONSUMER ELECTRONICS,939.8,867.5,59.6,6.43%,45.2,36.6,-9.5,-3.8,-6.9,-2.1,156.8,46.5 +Rail Vikas Nigam Ltd.,RVNL,542649,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"5,210.3","4,616",298.3,6.07%,6.2,132.7,455.4,85.2,394.3,1.9,"1,478.8",7.1 +Metropolis Healthcare Ltd.,METROPOLIS,542650,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE SERVICES,309.7,233.7,74.8,24.25%,22.2,5.7,48.1,12.5,35.5,6.9,133.4,26 +Polycab India Ltd.,POLYCAB,542652,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"4,253","3,608.8",608.9,14.44%,60.3,26.8,557.2,127.4,425.6,28.4,"1,607.2",107.1 +Multi Commodity Exchange of India Ltd.,MCX,534091,BANKING AND FINANCE,EXCHANGE,184,193.8,-28.7,-17.38%,6.6,0.1,-16.4,1.6,-19.1,-3.7,44.8,8.8 +IIFL Finance Ltd.,IIFL,532636,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,"2,533.7",788.3,"1,600.8",64.66%,43.3,932.1,683.5,158,474.3,12.4,"1,690.7",44.4 +Ratnamani Metals & Tubes Ltd.,RATNAMANI,520111,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"1,141.9",886.3,244.9,21.65%,23.6,10.8,221.1,56.8,163.9,23.4,622.6,88.8 +RHI Magnesita India Ltd.,RHIM,534076,GENERAL INDUSTRIALS,OTHER INDUSTRIAL GOODS,989.7,839,147.9,14.98%,44.2,8.5,97.9,26.3,71.3,3.5,-502.2,-24.3 +Birlasoft Ltd.,BSOFT,532400,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,325.4","1,102.7",207.1,15.81%,21.5,5.7,195.5,50.4,145.1,5.2,378.4,13.7 +EIH Ltd.,EIHOTEL,500840,HOTELS RESTAURANTS & TOURISM,HOTELS,552.5,387.6,142.9,26.94%,33.2,5.6,126.1,36.2,93.1,1.5,424.1,6.8 +Affle (India) Ltd.,AFFLE,542752,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,441.2,344.1,87.2,20.22%,18.4,5.5,73.2,6.4,66.8,5,264.3,19.8 +Westlife Foodworld Ltd.,WESTLIFE,505533,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,618,516.5,98.2,15.98%,43.9,27.4,30.2,7.8,22.4,1.4,107.7,6.9 +IndiaMART InterMESH Ltd.,INDIAMART,542726,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,329.3,214.7,80,27.15%,8,2.3,104.3,23.9,69.4,11.4,321.1,53.6 +Infosys Ltd.,INFY,500209,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"39,626","29,554","9,44",24.21%,"1,166",138,"8,768","2,553","6,212",15,"24,871",60.1 +Sterling and Wilson Renewable Energy Ltd.,SWSOLAR,542760,COMMERCIAL SERVICES & SUPPLIES,CONSULTING SERVICES,776.7,758,1.5,0.19%,4.3,64.3,-50,4.6,-54.2,-2.9,-668.4,-35.2 +ABB India Ltd.,ABB,500002,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"2,846","2,330.7",438.5,15.84%,30.3,0.9,484.2,122.2,362.9,17.1,"1,208.7",57 +Poly Medicure Ltd.,POLYMED,531768,HEALTHCARE EQUIPMENT & SUPPLIES,HEALTHCARE SUPPLIES,351.4,253.1,84.2,24.97%,16,2.2,80.9,18.8,62.2,6.5,233.7,24.4 +GMM Pfaudler Ltd.,GMMPFAUDLR,505255,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,946,795.5,142,15.15%,32.2,21.5,96.8,26.5,71.1,15.8,183.2,40.8 +Gujarat Fluorochemicals Ltd.,FLUOROCHEM,542812,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,960.3,783.7,163.1,17.23%,67.5,34.2,74.8,22.1,52.7,4.8,915.2,83.3 +360 One Wam Ltd.,360ONE,542772,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,617.1,235.6,317.8,57.31%,13.7,139.9,226.8,40.8,186,5.2,696.8,19.5 +Tata Communications Ltd.,TATACOMM,500483,TELECOM SERVICES,OTHER TELECOM SERVICES,"4,897.9","3,857.1","1,015.5",20.84%,605.1,137.4,298.3,77.9,220.7,7.7,"1,322.3",46.4 +Alkyl Amines Chemicals Ltd.,ALKYLAMINE,506767,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,354.5,303.9,48.3,13.71%,12.5,1.7,36.4,9.2,27.2,5.3,171.3,33.5 +CSB Bank Ltd.,CSBBANK,542867,BANKING AND FINANCE,BANKS,835.8,317.5,174.6,25.41%,0,343.6,178,44.8,133.2,7.7,577.7,33.3 +Indian Railway Catering & Tourism Corporation Ltd.,IRCTC,542830,DIVERSIFIED CONSUMER SERVICES,TRAVEL SUPPORT SERVICES,"1,042.4",628.8,366.6,36.83%,14,4.4,395.2,100.5,294.7,3.7,"1,061.2",13.3 +Sumitomo Chemical India Ltd.,SUMICHEM,542920,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,928,715.5,187.9,20.80%,15.8,1.2,195.5,52,143.4,2.9,367.7,7.4 +Century Textiles & Industries Ltd.,CENTURYTEX,500040,COMMERCIAL SERVICES & SUPPLIES,PAPER & PAPER PRODUCTS,"1,114.9","1,069.2",33.8,3.07%,59.2,17,-30.5,-3.3,-30.4,-2.8,117.7,10.5 +SBI Cards and Payment Services Ltd.,SBICARD,543066,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"4,221.4","2,018.8","1,327",32.47%,46.8,604.9,809.4,206.4,603,6.4,"2,302.2",24.3 +Hitachi Energy India Ltd.,POWERINDIA,543187,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"1,228.2","1,162.6",65.3,5.32%,22.5,10.7,32.4,7.6,24.7,5.8,82.5,19.5 +Suven Pharmaceuticals Ltd.,SUVENPHAR,543064,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,250.9,133.1,98,42.40%,11.9,0.5,105.4,25.8,79.6,3.1,431.8,17 +Tata Chemicals Ltd.,TATACHEM,500770,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,"4,083","3,179",819,20.49%,234,145,627,120,428,16.8,"2,06",80.8 +Aarti Drugs Ltd.,AARTIDRUGS,524348,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,642.2,565.1,76.4,11.92%,12.6,8.2,56.3,16.7,39.6,4.3,180.2,19.6 +Gujarat Ambuja Exports Ltd.,GAEL,524226,FMCG,EDIBLE OILS,"1,157.7","1,012.2",103.3,9.26%,30.5,5.9,109.1,26.3,82.8,3.6,305.1,13.3 +Polyplex Corporation Ltd.,POLYPLEX,524051,COMMERCIAL SERVICES & SUPPLIES,CONTAINERS & PACKAGING,"1,595.7","1,451.5",120.6,7.67%,75.1,9.9,59.1,10.9,27.9,8.9,71.1,22.6 +Chalet Hotels Ltd.,CHALET,542399,HOTELS RESTAURANTS & TOURISM,HOTELS,318.2,188.6,126,40.04%,35,50.1,44.5,8,36.4,1.8,266.7,13 +Adani Enterprises Ltd.,ADANIENT,512599,COMMERCIAL SERVICES & SUPPLIES,COMMODITY TRADING & DISTRIBUTION,"23,066","20,087.2","2,430.1",10.79%,757,"1,342.8",791,397.8,227.8,2,"2,444.3",21.4 +YES Bank Ltd.,YESBANK,532648,BANKING AND FINANCE,BANKS,"7,980.6","2,377.1",810,12.06%,0,"4,793.6",304.4,75.7,228.6,0.1,836.6,0.3 +EPL Ltd.,EPL,500135,COMMERCIAL SERVICES & SUPPLIES,CONTAINERS & PACKAGING,"1,011.2",820.6,181,18.07%,83.6,30.6,76.4,25.4,50.5,1.6,251.9,7.9 +Network18 Media & Investments Ltd.,NETWORK18,532798,MEDIA,BROADCASTING & CABLE TV,"2,052.2","2,083.8",-218.3,-11.70%,56.8,66.2,-154.5,-6.5,-61,-0.6,-144.2,-1.4 +CIE Automotive India Ltd.,CIEINDIA,532756,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,299.4","1,934",345.4,15.15%,78.3,31,256.1,69.1,375.4,9.9,298.4,7.9 +Vedanta Ltd.,VEDL,500295,METALS & MINING,ALUMINIUM AND ALUMINIUM PRODUCTS,"39,585","27,466","11,479",29.47%,"2,642","2,523","8,177","9,092","-1,783",-4.8,"5,202",14 +Rossari Biotech Ltd.,ROSSARI,543213,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,484.8,419.9,63.6,13.15%,15.1,5,44.8,11.9,32.9,6,116.8,21.2 +KPIT Technologies Ltd.,KPITTECH,542651,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,"1,208.6",959.2,239.9,20.01%,48.1,13.6,187.7,46.3,140.9,5.2,486.9,18 +Intellect Design Arena Ltd.,INTELLECT,538835,SOFTWARE & SERVICES,IT SOFTWARE PRODUCTS,631.7,497.2,121.9,19.69%,33.7,0.8,96.5,25.7,70.4,5.2,316.6,23.2 +Balaji Amines Ltd.,BALAMINES,530999,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,387.3,326.8,53.8,14.13%,10.8,1.8,48,11.6,34.7,10.7,197.3,60.9 +UTI Asset Management Company Ltd.,UTIAMC,543238,BANKING AND FINANCE,ASSET MANAGEMENT COS.,405.6,172.5,231.5,57.30%,10.4,2.8,219.8,37,182.8,14.4,562.9,44.3 +Mazagon Dock Shipbuilders Ltd.,MAZDOCK,543237,TRANSPORTATION,SHIPPING,"2,079.2","1,651.1",176.6,9.66%,20.2,1.3,406.6,102.8,332.9,16.5,"1,327.6",65.8 +Computer Age Management Services Ltd.,CAMS,543232,BANKING AND FINANCE,CAPITAL MARKETS,284.7,153,122.1,44.39%,17.4,2,112.4,28.6,84.5,17.2,309.2,62.9 +Happiest Minds Technologies Ltd.,HAPPSTMNDS,543227,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,428.8,324,82.6,20.32%,14.6,11.2,79.1,20.7,58.5,3.9,232,15.6 +Triveni Turbine Ltd.,TRITURBINE,533655,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,402.3,313.4,74.3,19.17%,5.1,0.6,83.2,19,64.2,2,233.1,7.3 +Angel One Ltd.,ANGELONE,ASM,BANKING AND FINANCE,CAPITAL MARKETS,"1,049.3",602.6,443.4,42.31%,11.2,26.4,407.2,102.7,304.5,36.3,"1,020.2",121.7 +Tanla Platforms Ltd.,TANLA,532790,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"1,014.9",811.8,196.8,19.51%,22.6,1.8,178.7,36.2,142.5,10.6,514.7,38.3 +Max Healthcare Institute Ltd.,MAXHEALTH,543220,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,"1,408.6",975.8,387.4,28.42%,57.9,8.5,366.4,89.7,276.7,2.9,990.1,10.2 +Asahi India Glass Ltd.,ASAHIINDIA,515030,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,122.6",934,185.6,16.58%,43,34.4,111.3,30.2,86.9,3.6,343.5,14.1 +Prince Pipes & Fittings Ltd.,PRINCEPIPE,542907,GENERAL INDUSTRIALS,PLASTIC PRODUCTS,660.4,562.3,94.2,14.35%,22.5,0.7,92.8,22.2,70.6,5.2,219.8,19.9 +Route Mobile Ltd.,ROUTE,543228,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"1,018.3",886.5,128.1,12.63%,21.4,6.5,103.8,15.5,88.8,14.2,365.3,58.3 +KPR Mill Ltd.,KPRMILL,532889,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"1,533","1,212.9",298,19.72%,46,18.1,256,54.2,201.8,5.9,788.8,23.1 +Infibeam Avenues Ltd.,INFIBEAM,539807,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,792.6,719.7,70.2,8.89%,17.1,0.5,55.2,14.7,41,0.1,142.2,0.5 +Restaurant Brands Asia Ltd.,RBA,543248,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,628.2,568.7,56.2,9.00%,78.6,31.5,-50.7,0,-46,-0.9,-220.3,-4.5 +Larsen & Toubro Ltd.,LT,500510,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"52,157","45,392.1","5,632",11.04%,909.9,864,"4,991.1","1,135.5","3,222.6",22.9,"12,255.3",89.2 +Gland Pharma Ltd.,GLAND,543245,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,426.6","1,049.3",324.1,23.60%,81.3,6,289.9,95.8,194.1,11.8,698.8,42.4 +Macrotech Developers Ltd.,LODHA,543287,REALTY,REALTY,"1,755.1","1,333.5",416.1,23.78%,29.3,123.1,269.2,62.4,201.9,2.1,"1,529.2",15.9 +Poonawalla Fincorp Ltd.,POONAWALLA,524000,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),745.3,178.9,531.7,71.98%,14.7,215.5,"1,124.6",270,860.2,11.2,"1,466.4",19.1 +The Fertilisers and Chemicals Travancore Ltd.,FACT,590024,FERTILIZERS,FERTILIZERS,"1,713.6","1,530.8",132.4,7.96%,5.3,61.2,105.2,0,105.2,1.6,508.4,7.9 +Home First Finance Company India Ltd.,HOMEFIRST,543259,BANKING AND FINANCE,HOUSING FINANCE,278,53.7,211.6,77.43%,2.8,117,96.4,22.1,74.3,8.4,266.2,30.2 +CG Power and Industrial Solutions Ltd.,CGPOWER,500093,GENERAL INDUSTRIALS,HEAVY ELECTRICAL EQUIPMENT,"2,019","1,692.9",308.6,15.42%,22.9,0.4,329.9,86.2,242.3,1.6,"1,1",7.2 +Laxmi Organic Industries Ltd.,LXCHEM,543277,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,660.5,613.3,38.9,5.97%,27.5,2.1,17.5,6.8,10.7,0.4,100.6,3.8 +Anupam Rasayan India Ltd.,ANURAS,543275,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,395.6,284.7,107.5,27.41%,19.8,20.4,70.7,22,40.7,3.8,178.9,16.6 +Kalyan Jewellers India Ltd.,KALYANKJIL,ASM,TEXTILES APPARELS & ACCESSORIES,GEMS & JEWELLERY,"4,427.7","4,100.9",313.7,7.11%,66.9,81.7,178.1,43.3,135.2,1.3,497.9,4.8 +Jubilant Pharmova Ltd.,JUBLPHARMA,530019,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,690.2","1,438.5",241.8,14.39%,96.6,66.1,89,35.9,62.5,3.9,-44.6,-2.8 +Indigo Paints Ltd.,INDIGOPNTS,543258,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,273.4,228.7,41.8,15.45%,10,0.5,34.3,8.2,26.1,5.5,132.4,27.8 +Indian Railway Finance Corporation Ltd.,IRFC,543257,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"6,767.5",33.5,"6,732.4",99.50%,2.1,"5,181.5","1,549.9",0,"1,549.9",1.2,"6,067.6",4.6 +Mastek Ltd.,MASTEK,523704,SOFTWARE & SERVICES,IT CONSULTING & SOFTWARE,770.4,642.5,123,16.07%,20.9,12.6,90.3,25,62.8,20.5,269.7,88 +Equitas Small Finance Bank Ltd.,EQUITASBNK,543243,BANKING AND FINANCE,BANKS,"1,540.4",616.8,330.2,24.30%,0,593.4,267,68.9,198.1,1.8,749.5,6.7 +Tata Teleservices (Maharashtra) Ltd.,TTML,532371,TELECOM SERVICES,TELECOM SERVICES,288.6,159.3,127.5,44.45%,36.3,403.2,-310.2,0,-310.2,-1.6,"-1,168.3",-6 +Praj Industries Ltd.,PRAJIND,522205,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,893.3,798.4,84,9.52%,9.1,1,84.8,22.4,62.4,3.4,271.4,14.8 +Nazara Technologies Ltd.,NAZARA,543280,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,309.5,269.4,26.7,8.98%,15.1,2.7,21.2,-1.3,19.8,3,60,9.1 +Jubilant Ingrevia Ltd.,JUBLINGREA,543271,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"1,028.5",902.3,117.7,11.54%,33.9,12.5,79.8,22.4,57.5,3.6,258.9,16.4 +Sona BLW Precision Forgings Ltd.,SONACOMS,543300,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,796.9,567.5,223.3,28.24%,53.4,6,164.1,40.1,123.8,2.1,462.8,7.9 +Chemplast Sanmar Ltd.,CHEMPLASTS,543336,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"1,025",941.8,46,4.65%,35.3,38.6,9.2,-16.8,26.1,1.6,35.3,2.2 +Aptus Value Housing Finance India Ltd.,APTUS,543335,BANKING AND FINANCE,HOUSING FINANCE,344.5,50.6,277.5,83.18%,2.6,96.1,189.6,41.5,148,3,551.1,11.1 +Clean Science & Technology Ltd.,CLEAN,543318,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,187.1,106.3,74.8,41.32%,11.1,0.3,69.5,17.3,52.2,4.9,275.5,25.9 +Medplus Health Services Ltd.,MEDPLUS,543427,HEALTHCARE EQUIPMENT & SUPPLIES,HEALTHCARE SUPPLIES,"1,419","1,323.5",85.1,6.04%,55.5,23.5,16.4,1.9,14.6,1.2,58.3,4.9 +Nuvoco Vistas Corporation Ltd.,NUVOCO,543334,CEMENT AND CONSTRUCTION,CEMENT & CEMENT PRODUCTS,"2,578.9","2,243",329.9,12.82%,225.6,139.9,-29.6,-31.1,1.5,0,141.8,4 +Star Health and Allied Insurance Company Ltd.,STARHEALTH,543412,BANKING AND FINANCE,GENERAL INSURANCE,"3,463.2","3,295.8",165.7,4.79%,0,0,167.1,41.8,125.3,2.1,725.4,12.4 +Go Fashion (India) Ltd.,GOCOLORS,543401,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,192.8,132.2,56.6,29.98%,25.8,8.9,25.8,5.7,20,3.7,85.4,15.8 +PB Fintech Ltd.,POLICYBZR,543390,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,909.1,900.7,-89.1,-10.98%,22.3,7.2,-21.1,-0.3,-20.2,-0.5,-127.9,-2.8 +FSN E-Commerce Ventures Ltd.,NYKAA,543384,SOFTWARE & SERVICES,INTERNET & CATALOGUE RETAIL,"1,515.6","1,426.4",80.6,5.35%,54.6,21.3,13.3,4,5.8,0,19.8,0.1 +Krishna Institute of Medical Sciences Ltd.,KIMS,543308,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,655.4,475.2,177.3,27.17%,32.6,8.9,138.6,37.3,92,11.5,342.1,42.7 +Zomato Ltd.,ZOMATO,543320,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"3,06","2,895",-47,-1.65%,128,16,21,-15,36,0,-496.8,-0.6 +Brightcom Group Ltd.,BCG,532368,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"1,690.5","1,172.3",518,30.65%,72.3,0.1,445.8,124.3,321.5,1.6,"1,415.2",7 +Shyam Metalics and Energy Ltd.,SHYAMMETL,543299,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"2,978.9","2,633.6",307.1,10.44%,176.5,35.4,133.4,-348.6,484.1,18.9,"1,049.9",41.2 +G R Infraprojects Ltd.,GRINFRA,543317,CEMENT AND CONSTRUCTION,ROADS & HIGHWAYS,"1,909.2","1,415.7",467.1,24.81%,61.7,144.6,287.1,69.9,217.2,22.5,"1,240.3",128.3 +RattanIndia Enterprises Ltd.,RTNINDIA,534597,UTILITIES,ELECTRIC UTILITIES,"1,618.1","1,392.8",1.5,0.11%,4.3,28.8,142.2,1.7,140.9,1,147.6,1.1 +Borosil Renewables Ltd.,BORORENEW,502219,CONSUMER DURABLES,HOUSEWARE,406.3,369.2,32.5,8.09%,31,9.6,28.9,-1.1,25.1,1.9,32.1,2.5 +HLE Glascoat Ltd.,HLEGLAS,522215,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,227.8,198,26.5,11.79%,6.1,5.8,16.1,5.3,10,1.6,54.4,8 +Tata Investment Corporation Ltd.,TATAINVEST,501301,DIVERSIFIED,HOLDING COMPANIES,125,10.1,113.8,91.88%,0.2,4.7,110.1,-1.3,124.4,24.6,326.1,64.4 +Sapphire Foods India Ltd.,SAPPHIRE,543397,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,650.1,527.5,115.1,17.91%,76.8,24.5,21.4,6.2,15.3,2.4,208.5,32.7 +Devyani International Ltd.,DEVYANI,543330,HOTELS RESTAURANTS & TOURISM,RESTAURANTS,826,665,154.4,18.84%,86.3,41.7,19,-16.8,33.4,0.3,177.5,1.5 +Vijaya Diagnostic Centre Ltd.,VIJAYA,543350,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE SERVICES,145.6,81.5,57.4,41.31%,13.7,5.9,44.6,11,33.3,3.3,103.4,10.1 +C.E. Info Systems Ltd.,MAPMYINDIA,543425,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,99.3,50.1,41,44.98%,3.7,0.7,44.7,11.1,33,6.1,122.9,22.7 +Latent View Analytics Ltd.,LATENTVIEW,543398,SOFTWARE & SERVICES,DATA PROCESSING SERVICES,172.7,124.9,30.8,19.78%,2.3,0.8,44.7,10.6,34,1.7,153.6,7.5 +Metro Brands Ltd.,METROBRAND,543426,RETAILING,FOOTWEAR,571.9,400.3,155.4,27.96%,57.2,19.7,94.7,27.5,66.7,2.5,340,12.5 +Easy Trip Planners Ltd.,EASEMYTRIP,543272,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,144.6,76.9,64.8,45.71%,1,2,64.7,17.7,47.2,0.3,146,0.8 +Shree Renuka Sugars Ltd.,RENUKA,532670,FOOD BEVERAGES & TOBACCO,SUGAR,"2,564.7","2,491",63.7,2.49%,64.1,216.8,-207.2,-1.6,-204.9,-1,-286,-1.3 +One97 Communications Ltd.,PAYTM,543396,SOFTWARE & SERVICES,INTERNET SOFTWARE & SERVICES,"2,662.5","2,749.6",-231,-9.17%,180.1,7,-279.9,12.7,-290.5,-5,"-1,207.9",-19 +MTAR Technologies Ltd.,MTARTECH,543270,GENERAL INDUSTRIALS,DEFENCE,167.7,130.7,36.1,21.64%,5.8,5.5,25.7,5.2,20.5,6.7,103.3,33.6 +Capri Global Capital Ltd.,CGCL,531595,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),557.4,229.3,304.8,54.70%,23.1,195.8,86,20.8,65.2,3.2,231.2,11.2 +GMR Airports Infrastructure Ltd.,GMRINFRA,ASM,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"2,185","1,336.8",726.7,35.22%,373,695.8,-252,54.9,-91,-0.1,-370.9,-0.6 +Triveni Engineering & Industries Ltd.,TRIVENI,532356,FOOD BEVERAGES & TOBACCO,SUGAR,"1,629.7","1,554.5",62.9,3.89%,25.8,10.2,39.3,10.1,29.1,1.3,434.3,19.8 +Delhivery Ltd.,DELHIVERY,543529,TRANSPORTATION,TRANSPORTATION - LOGISTICS,"2,043","1,957.3",-15.6,-0.80%,171.2,19.6,-105.2,-2.1,-102.9,-1.4,-546.7,-7.5 +Life Insurance Corporation of India,LICI,543526,BANKING AND FINANCE,LIFE INSURANCE,"202,394.9","193,612.5","8,445",4.18%,0,0,"8,696.5","1,083.9","8,030.3",12.7,"37,204.8",58.8 +Campus Activewear Ltd.,CAMPUS,543523,RETAILING,FOOTWEAR,259.1,234.2,24.5,9.46%,18.1,6.5,0.4,0.1,0.3,0,103.1,3.4 +Motherson Sumi Wiring India Ltd.,MSUMI,543498,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"2,110.2","1,856.5",248.1,11.79%,36.4,7.4,210,54.1,155.9,0.3,523.6,1.2 +Olectra Greentech Ltd.,OLECTRA,532439,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,310.3,266.6,40.5,13.20%,8.8,9.7,25.2,8,18.6,2.2,78.5,9.6 +Patanjali Foods Ltd.,PATANJALI,500368,FMCG,EDIBLE OILS,"7,845.8","7,426.6",395.3,5.05%,60.1,24,335.1,80.5,254.5,7,875.2,24.2 +Raymond Ltd.,RAYMOND,500330,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"2,320.7","1,938.8",314.6,13.96%,65.4,89.3,204.2,50.7,159.8,24,"1,514.2",227.5 +Swan Energy Ltd.,SWANENERGY,503310,REALTY,REALTY,"1,230.1",966.3,257,21.01%,27.1,58.3,178.4,12.8,84.6,6.7,308.4,11.7 +Samvardhana Motherson International Ltd.,MOTHERSON,517334,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"23,639.2","21,585","1,888.8",8.05%,867.4,487.9,449.5,229.2,201.6,0.3,"1,910.3",2.8 +Vedant Fashions Ltd.,MANYAVAR,543463,RETAILING,SPECIALTY RETAIL,233.4,125.5,92.8,42.51%,32.5,10.7,64.8,16.1,48.7,2,399.9,16.5 +Adani Wilmar Ltd.,AWL,543458,FMCG,EDIBLE OILS,"12,331.2","12,123.5",143.7,1.17%,95.7,220.2,-161.8,-31.5,-130.7,-1,130.1,1 +Mahindra Lifespace Developers Ltd.,MAHLIFE,532313,REALTY,REALTY,25.7,52.7,-34.9,-196.45%,3.1,0.2,-30.3,-10.8,-18.9,-1.2,10.5,0.7 +Tejas Networks Ltd.,TEJASNET,540595,TELECOM SERVICES,OTHER TELECOM SERVICES,413.9,383,13,3.28%,41.7,7,-17.7,-5.1,-12.6,-0.7,-61.3,-3.5 +Aether Industries Ltd.,AETHER,543534,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,178.3,118.2,46,28.00%,9.7,1.6,48.7,12.1,36.7,2.8,139.1,10.5 +JBM Auto Ltd.,JBMA,ASM,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,238.8","1,091.3",139.7,11.35%,41.2,47.9,58.3,11.3,44.2,3.7,136.8,11.6 +Deepak Fertilisers & Petrochemicals Corporation Ltd.,DEEPAKFERT,500645,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,"2,443.2","2,138.1",286.1,11.80%,81.2,107.1,116.8,53.3,60.1,4.8,674.5,53.4 +Sharda Cropchem Ltd.,SHARDACROP,538666,CHEMICALS & PETROCHEMICALS,AGROCHEMICALS,604.3,559.6,21.2,3.65%,74,4.6,-33.8,-6.3,-27.6,-3.1,191,21.2 +Shoppers Stop Ltd.,SHOPERSTOP,532638,RETAILING,DEPARTMENT STORES,"1,049.7",878.2,160.9,15.49%,108.2,54.9,3.5,0.8,2.7,0.2,94.2,8.6 +BEML Ltd.,BEML,500048,AUTOMOBILES & AUTO COMPONENTS,COMMERCIAL VEHICLES,924,855.3,61.5,6.70%,15.8,10.8,42.2,-9.6,51.8,12.4,200.8,48.2 +Lemon Tree Hotels Ltd.,LEMONTREE,541233,HOTELS RESTAURANTS & TOURISM,HOTELS,230.1,125.3,101.9,44.84%,22.6,47.3,34.8,8.6,22.6,0.3,130.1,1.6 +Rainbow Childrens Medicare Ltd.,RAINBOW,543524,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,340.5,215.1,117.6,35.34%,26.8,13.3,85.2,22.1,62.9,6.2,215.4,21.2 +UCO Bank,UCOBANK,532505,BANKING AND FINANCE,BANKS,"5,865.6","1,581.5",981.9,18.81%,0,"3,302.3",639.8,238.1,403.5,0.3,"1,84",1.5 +Piramal Pharma Ltd.,PPLPHARMA,543635,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"1,960.6","1,645.7",265.6,13.90%,184.5,109.9,20.4,34.5,5,0,-133.6,-1 +KSB Ltd.,KSB,500249,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,572.2,493.4,70.3,12.47%,12.3,2,64.5,17.1,50.1,14.4,209.7,60.3 +Data Patterns (India) Ltd.,DATAPATTNS,543428,GENERAL INDUSTRIALS,DEFENCE,119.2,67.5,40.8,37.63%,3.1,2.3,46.3,12.5,33.8,6,148.3,26.5 +Global Health Ltd.,MEDANTA,543654,DIVERSIFIED CONSUMER SERVICES,HEALTHCARE FACILITIES,864.7,631.1,212.9,25.22%,42.9,20.1,170.6,45.4,125.2,4.7,408.9,15.2 +Aarti Industries Ltd.,AARTIIND,524208,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,"1,454","1,221.2",232.8,16.01%,93,58.2,81.6,-9.1,90.7,2.5,446.2,12.3 +BLS International Services Ltd.,BLS,540073,DIVERSIFIED CONSUMER SERVICES,TRAVEL SUPPORT SERVICES,416.4,321,86.7,21.27%,7.3,1,87.2,5.2,78.7,1.9,267.6,6.5 +Archean Chemical Industries Ltd.,ACI,543657,CHEMICALS & PETROCHEMICALS,COMMODITY CHEMICALS,301.7,195,95.5,32.86%,17.5,1.9,87.3,21.3,66,5.4,394.4,32.1 +Adani Power Ltd.,ADANIPOWER,ASM,UTILITIES,ELECTRIC UTILITIES,"14,935.7","7,819.2","5,171.4",39.81%,"1,004.5",888.4,"5,223.6","-1,370.6","6,594.2",16.5,"20,604.8",53.4 +Craftsman Automation Ltd.,CRAFTSMAN,543276,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,183.8",941.6,237.5,20.14%,66.8,41.6,133.8,29.6,94.5,44.1,298.3,141.2 +NMDC Ltd.,NMDC,526371,METALS & MINING,MINING,"4,335","2,823.6","1,190.4",29.66%,88.8,18.6,"1,404.1",379,"1,026.2",3.5,"5,862.2",20 +Epigral Ltd.,EPIGRAL,543332,CHEMICALS & PETROCHEMICALS,SPECIALTY CHEMICALS,479.1,370.2,107.9,22.57%,31.5,21.3,56.1,17.9,38,9.1,223.4,53.8 +Apar Industries Ltd.,APARINDS,532259,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,"3,944.7","3,576.2",349.8,8.91%,28.2,103.1,237.3,62.9,173.9,45.4,783.9,204.8 +Bikaji Foods International Ltd.,BIKAJI,543653,FMCG,PACKAGED FOODS,614.7,521,87.7,14.41%,15.6,2.9,75.2,15.4,61.2,2.5,173.6,6.9 +Five-Star Business Finance Ltd.,FIVESTAR,543663,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),522.4,133.2,375,72.28%,5.7,105.9,267,67.6,199.4,6.8,703,24.1 +Ingersoll-Rand (India) Ltd.,INGERRAND,500210,GENERAL INDUSTRIALS,INDUSTRIAL MACHINERY,282.8,210.7,65.7,23.76%,4.6,0.6,67,17.2,49.7,15.8,218.5,69.2 +KFIN Technologies Ltd.,KFINTECH,543720,BANKING AND FINANCE,OTHER FINANCIAL SERVICES,215.3,115.3,93.7,44.82%,12.6,3.2,84.2,22.3,61.4,3.6,215.1,12.6 +Piramal Enterprises Ltd.,PEL,500302,BANKING AND FINANCE,FINANCE (INCLUDING NBFCS),"2,205.2","1,320.1","1,117.9",50.97%,38.3,"1,038.9",-11.8,10.7,48.2,2,"3,906.5",173.9 +NMDC Steel Ltd.,NSLNISP,543768,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,290.3,349.6,-72.2,-26.04%,74.5,40.8,-174.7,-43.6,-131.1,-0.5,, +Eris Lifesciences Ltd.,ERIS,540596,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,508.8,324.2,181.1,35.85%,42.1,16.3,126.2,3.9,123.4,9.1,385.6,28.3 +Mankind Pharma Ltd.,MANKIND,543904,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,"2,768.1","2,025.5",682.6,25.21%,96.5,8.6,637.5,129.8,501,12.5,"1,564.8",39.1 +Kaynes Technology India Ltd.,KAYNES,ASM,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,369.8,312.1,48.8,13.52%,6.5,11.8,39.4,7.1,32.3,5.5,143.2,24.6 +Safari Industries (India) Ltd.,SAFARI,523025,TEXTILES APPARELS & ACCESSORIES,OTHER APPARELS & ACCESSORIES,372.9,306.6,63.5,17.15%,12.2,2.2,51.9,12.1,39.8,16.7,162.3,68.2 +Saregama India Ltd.,SAREGAMA,532163,MEDIA,MOVIES & ENTERTAINMENT,185.6,111.5,60.9,35.32%,8.2,0.2,65.6,17.6,48.1,2.5,193.4,10 +Syrma SGS Technology Ltd.,SYRMA,543573,CONSUMER DURABLES,OTHER ELECTRICAL EQUIPMENT/PRODUCTS,720.6,662.7,49,6.88%,11.6,8,37,6.4,28.3,1.6,132.4,7.5 +Jindal Saw Ltd.,JINDALSAW,ASM,GENERAL INDUSTRIALS,OTHER INDUSTRIAL PRODUCTS,"5,488.9","4,662",804.2,14.71%,142.5,188.7,495.6,139.6,375.7,11.8,"1,135.8",35.5 +Godawari Power & Ispat Ltd.,GPIL,532734,METALS & MINING,IRON & STEEL/INTERM.PRODUCTS,"1,314.2",929.6,361.4,28.00%,34.8,10.2,339.6,86.1,256.9,20.6,785.5,63 +Gillette India Ltd.,GILLETTE,507815,FMCG,PERSONAL PRODUCTS,676.2,530.8,136.7,20.48%,20.1,0.1,125.2,32.5,92.7,28.4,361.6,111 +Symphony Ltd.,SYMPHONY,517385,CONSUMER DURABLES,CONSUMER ELECTRONICS,286,234,41,14.91%,7,2,43,8,35,5.1,114,16.5 +Glenmark Life Sciences Ltd.,GLS,543322,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,600.7,428.3,167.1,28.06%,13.1,0.4,158.9,40.2,118.7,9.7,505.5,41.3 +Usha Martin Ltd.,USHAMART,517146,METALS & MINING,IRON & STEEL PRODUCTS,806,640.4,144.3,18.39%,18,6.4,141.2,35,109.5,3.6,399.4,13.1 +Ircon International Ltd.,IRCON,541956,CEMENT AND CONSTRUCTION,CONSTRUCTION & ENGINEERING,"3,136.3","2,771.2",215.7,7.22%,27.1,36.9,301.2,77.6,250.7,2.7,884.6,9.4 +Ujjivan Small Finance Bank Ltd.,UJJIVANSFB,542904,BANKING AND FINANCE,BANKS,"1,579.8",528.6,483.4,34.75%,0,567.8,436.4,108.7,327.7,1.7,"1,254.5",6.4 +Procter & Gamble Health Ltd.,PGHL,500126,PHARMACEUTICALS & BIOTECHNOLOGY,PHARMACEUTICALS,311,216.3,88.7,29.08%,6.5,0.2,88,22.5,65.6,39.5,231.4,139.4 +Allcargo Logistics Ltd.,ALLCARGO,532749,TRANSPORTATION,TRANSPORTATION - LOGISTICS,"3,336.3","3,188.8",118,3.57%,106.7,36.7,14.2,1.3,21.8,0.9,361.9,14.7 +Sheela Foam Ltd.,SFL,540203,DIVERSIFIED CONSUMER SERVICES,FURNITURE-FURNISHING-PAINTS,637.6,547,66.2,10.80%,21.9,8.6,60.2,15.6,44,4.5,192.4,17.7 +Alok Industries Ltd.,ALOKINDS,521070,TEXTILES APPARELS & ACCESSORIES,TEXTILES,"1,369.3","1,323.1",35.9,2.64%,78.6,142.2,-174.6,0,-174.8,-0.3,-948.4,-1.9 +Minda Corporation Ltd.,MINDACORP,538962,AUTOMOBILES & AUTO COMPONENTS,AUTO PARTS & EQUIPMENT,"1,197.9","1,064.5",131.3,10.98%,41.4,14.9,77,18.7,58.8,2.5,278.2,11.6 +Concord Biotech Ltd.,CONCORDBIO,543960,PHARMACEUTICALS & BIOTECHNOLOGY,BIOTECHNOLOGY,270.5,143.2,119.2,45.43%,13.3,0.8,113.2,28.7,81,7.7,, \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py similarity index 62% rename from sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment.py rename to sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py index 409846279762..87bd6ad44094 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py @@ -4,14 +4,14 @@ # ------------------------------------ """ -FILE: sample_agents_code_interpreter_attachment.py +FILE: sample_agents_code_interpreter.py DESCRIPTION: This sample demonstrates how to use agent operations with code interpreter from the Azure Agents service using a synchronous client. USAGE: - python sample_agents_code_interpreter_attachment.py + python sample_agents_code_interpreter.py Before running the sample: @@ -25,9 +25,9 @@ from azure.ai.projects import AIProjectClient from azure.ai.projects.models import CodeInterpreterTool from azure.ai.projects.models import FilePurpose -from azure.ai.projects.models import MessageAttachment +from azure.ai.projects.models import MessageTextFileCitationAnnotation, MessageTextFilePathAnnotation from azure.identity import DefaultAzureCredential - +from pathlib import Path # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" @@ -38,11 +38,12 @@ ) with project_client: + # upload a file and wait for it to be processed - file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) + file = project_client.agents.upload_file_and_poll(file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.AGENTS) print(f"Uploaded file, file ID: {file.id}") - code_interpreter = CodeInterpreterTool() + code_interpreter = CodeInterpreterTool(file_ids=[file.id]) # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment agent = project_client.agents.create_agent( @@ -50,16 +51,16 @@ name="my-assistant", instructions="You are helpful assistant", tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, ) print(f"Created agent, agent ID: {agent.id}") thread = project_client.agents.create_thread() print(f"Created thread, thread ID: {thread.id}") - # create a message with the attachment - attachment = MessageAttachment(file_id=file.id, tools=code_interpreter.definitions) + # create a message message = project_client.agents.create_message( - thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + thread_id=thread.id, role="user", content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?" ) print(f"Created message, message ID: {message.id}") @@ -73,8 +74,25 @@ project_client.agents.delete_file(file.id) print("Deleted file") - project_client.agents.delete_agent(agent.id) - print("Deleted agent") - - messages = project_client.agents.list_messages(thread_id=thread.id) + messages = project_client.agents.get_messages(thread_id=thread.id) print(f"Messages: {messages}") + + last_msg = messages.get_last_text_message_by_sender("assistant") + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + for image_content in messages.image_contents: + print(f"Image File ID: {image_content.image_file.file_id}") + project_client.agents.save_file(file_id=image_content.image_file.file_id, file_name="image_file.png") + print(f"Saved image file to: {Path.cwd() / 'image_file.png'}") + + for file_path_annotation in messages.file_path_annotations: + print(f"File Paths:") + print(f"Type: {file_path_annotation.type}") + print(f"Text: {file_path_annotation.text}") + print(f"File ID: {file_path_annotation.file_path.file_id}") + print(f"Start Index: {file_path_annotation.start_index}") + print(f"End Index: {file_path_annotation.end_index}") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py index 16d65bec0e2e..d929dfd05afc 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py @@ -56,7 +56,7 @@ tools=file_search.definitions, tool_resources=file_search.resources, ) - print(f"Created agent, agent ID: {agent.id}") + print(f"Created agent, ID: {agent.id}") # Create thread for communication thread = project_client.agents.create_thread() From b5744a7ce579869a57ac52aad92a3bc0be09172b Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Tue, 29 Oct 2024 20:16:54 -0700 Subject: [PATCH 060/138] Update README.md (#38173) --- sdk/ai/azure-ai-projects/tests/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/ai/azure-ai-projects/tests/README.md b/sdk/ai/azure-ai-projects/tests/README.md index b33de5300a94..704a2cd723ea 100644 --- a/sdk/ai/azure-ai-projects/tests/README.md +++ b/sdk/ai/azure-ai-projects/tests/README.md @@ -19,7 +19,7 @@ The instructions below are for running tests locally, on a Windows machine, agai ``` - Then install the resulting local wheel (update version `1.0.0b1` to the current one): ```bash - pip install dist\azure_ai_project-1.0.0b1-py3-none-any.whl --user --force-reinstall + pip install dist\azure_ai_projects-1.0.0b1-py3-none-any.whl --user --force-reinstall ``` ## Log in to Azure @@ -63,9 +63,9 @@ pytest To run tests in a particular folder (`tests\connections` for example): ```bash -python tests\connections +pytest tests\connections ``` ## Additional information -See [test documentation](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md) for additional information, including how to set proxy recordings and run tests using recordings. \ No newline at end of file +See [test documentation](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/tests.md) for additional information, including how to set proxy recordings and run tests using recordings. From 42a4c8eb08e73bca5ad8f20f1d93446a565f7bd4 Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Wed, 30 Oct 2024 07:56:57 -0800 Subject: [PATCH 061/138] update to get file content (#38192) --- .../ai/projects/aio/operations/_operations.py | 2 +- .../azure/ai/projects/models/_models.py | 193 +++++++++--------- .../azure/ai/projects/models/_patch.py | 45 ++-- .../ai/projects/operations/_operations.py | 2 +- .../azure/ai/projects/operations/_patch.py | 29 ++- .../agents/sample_agents_code_interpreter.py | 10 +- .../tests/connections/connection_test_base.py | 18 +- .../tests/connections/test_connections.py | 95 +++++---- .../connections/test_connections_async.py | 92 ++++++--- .../tests/inference/inference_test_base.py | 2 +- sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 11 files changed, 279 insertions(+), 211 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index d988d121e51c..42fb879dce3b 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -3517,7 +3517,7 @@ async def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: return deserialized # type: ignore @distributed_trace_async - async def get_file_content(self, file_id: str, **kwargs: Any) -> bytes: + async def _get_file_content(self, file_id: str, **kwargs: Any) -> bytes: """Retrieves the raw content of a specific file. :param file_id: The ID of the file to retrieve. Required. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 794cbb4b7342..2fc919dc040a 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -699,102 +699,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class InternalConnectionProperties(_model_base.Model): - """Connection properties. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - InternalConnectionPropertiesAADAuth, InternalConnectionPropertiesApiKeyAuth, InternalConnectionPropertiesSASAuth - - - :ivar auth_type: Authentication type of the connection target. Required. Known values are: - "ApiKey", "AAD", and "SAS". - :vartype auth_type: str or ~azure.ai.projects.models.AuthenticationType - """ - - __mapping__: Dict[str, _model_base.Model] = {} - auth_type: str = rest_discriminator(name="authType") - """Authentication type of the connection target. Required. Known values are: \"ApiKey\", \"AAD\", - and \"SAS\".""" - - -class InternalConnectionPropertiesAADAuth(InternalConnectionProperties, discriminator="AAD"): - """Connection properties for connections with AAD authentication (aka ``Entra ID passthrough``\\ - ). - - - :ivar auth_type: Authentication type of the connection target. Required. Entra ID - authentication - :vartype auth_type: str or ~azure.ai.projects.models.AAD - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". - :vartype category: str or ~azure.ai.projects.models.ConnectionType - :ivar target: The connection URL to be used for this service. Required. - :vartype target: str - """ - - auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore - """Authentication type of the connection target. Required. Entra ID authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", and \"AIServices\".""" - target: str = rest_field() - """The connection URL to be used for this service. Required.""" - - -class InternalConnectionPropertiesApiKeyAuth(InternalConnectionProperties, discriminator="ApiKey"): - """Connection properties for connections with API key authentication. - - - :ivar auth_type: Authentication type of the connection target. Required. API Key authentication - :vartype auth_type: str or ~azure.ai.projects.models.API_KEY - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". - :vartype category: str or ~azure.ai.projects.models.ConnectionType - :ivar credentials: Credentials will only be present for authType=ApiKey. Required. - :vartype credentials: ~azure.ai.projects.models._models.CredentialsApiKeyAuth - :ivar target: The connection URL to be used for this service. Required. - :vartype target: str - """ - - auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore - """Authentication type of the connection target. Required. API Key authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", and \"AIServices\".""" - credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() - """Credentials will only be present for authType=ApiKey. Required.""" - target: str = rest_field() - """The connection URL to be used for this service. Required.""" - - -class InternalConnectionPropertiesSASAuth(InternalConnectionProperties, discriminator="SAS"): - """Connection properties for connections with SAS authentication. - - - :ivar auth_type: Authentication type of the connection target. Required. Shared Access - Signature (SAS) authentication - :vartype auth_type: str or ~azure.ai.projects.models.SAS - :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". - :vartype category: str or ~azure.ai.projects.models.ConnectionType - :ivar credentials: Credentials will only be present for authType=ApiKey. Required. - :vartype credentials: ~azure.ai.projects.models._models.CredentialsSASAuth - :ivar target: The connection URL to be used for this service. Required. - :vartype target: str - """ - - auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType") # type: ignore - """Authentication type of the connection target. Required. Shared Access Signature (SAS) - authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", and \"AIServices\".""" - credentials: "_models._models.CredentialsSASAuth" = rest_field() - """Credentials will only be present for authType=ApiKey. Required.""" - target: str = rest_field() - """The connection URL to be used for this service. Required.""" - - class ConnectionResource(_model_base.Model): """A connection resource. @@ -1535,6 +1439,103 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) +class InternalConnectionProperties(_model_base.Model): + """Connection properties. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + InternalConnectionPropertiesAADAuth, InternalConnectionPropertiesApiKeyAuth, + InternalConnectionPropertiesSASAuth + + + :ivar auth_type: Authentication type of the connection target. Required. Known values are: + "ApiKey", "AAD", and "SAS". + :vartype auth_type: str or ~azure.ai.projects.models.AuthenticationType + """ + + __mapping__: Dict[str, _model_base.Model] = {} + auth_type: str = rest_discriminator(name="authType") + """Authentication type of the connection target. Required. Known values are: \"ApiKey\", \"AAD\", + and \"SAS\".""" + + +class InternalConnectionPropertiesAADAuth(InternalConnectionProperties, discriminator="AAD"): + """Connection properties for connections with AAD authentication (aka ``Entra ID passthrough``\\ + ). + + + :ivar auth_type: Authentication type of the connection target. Required. Entra ID + authentication + :vartype auth_type: str or ~azure.ai.projects.models.AAD + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". + :vartype category: str or ~azure.ai.projects.models.ConnectionType + :ivar target: The connection URL to be used for this service. Required. + :vartype target: str + """ + + auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. Entra ID authentication""" + category: Union[str, "_models.ConnectionType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", + \"AzureBlob\", and \"AIServices\".""" + target: str = rest_field() + """The connection URL to be used for this service. Required.""" + + +class InternalConnectionPropertiesApiKeyAuth(InternalConnectionProperties, discriminator="ApiKey"): + """Connection properties for connections with API key authentication. + + + :ivar auth_type: Authentication type of the connection target. Required. API Key authentication + :vartype auth_type: str or ~azure.ai.projects.models.API_KEY + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". + :vartype category: str or ~azure.ai.projects.models.ConnectionType + :ivar credentials: Credentials will only be present for authType=ApiKey. Required. + :vartype credentials: ~azure.ai.projects.models._models.CredentialsApiKeyAuth + :ivar target: The connection URL to be used for this service. Required. + :vartype target: str + """ + + auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. API Key authentication""" + category: Union[str, "_models.ConnectionType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", + \"AzureBlob\", and \"AIServices\".""" + credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() + """Credentials will only be present for authType=ApiKey. Required.""" + target: str = rest_field() + """The connection URL to be used for this service. Required.""" + + +class InternalConnectionPropertiesSASAuth(InternalConnectionProperties, discriminator="SAS"): + """Connection properties for connections with SAS authentication. + + + :ivar auth_type: Authentication type of the connection target. Required. Shared Access + Signature (SAS) authentication + :vartype auth_type: str or ~azure.ai.projects.models.SAS + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", and "AIServices". + :vartype category: str or ~azure.ai.projects.models.ConnectionType + :ivar credentials: Credentials will only be present for authType=ApiKey. Required. + :vartype credentials: ~azure.ai.projects.models._models.CredentialsSASAuth + :ivar target: The connection URL to be used for this service. Required. + :vartype target: str + """ + + auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType") # type: ignore + """Authentication type of the connection target. Required. Shared Access Signature (SAS) + authentication""" + category: Union[str, "_models.ConnectionType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", + \"AzureBlob\", and \"AIServices\".""" + credentials: "_models._models.CredentialsSASAuth" = rest_field() + """Credentials will only be present for authType=ApiKey. Required.""" + target: str = rest_field() + """The connection URL to be used for this service. Required.""" + + class ListConnectionsResponse(_model_base.Model): """Response from the list operation. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 003d0024d0b0..3350db13b2bb 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -44,7 +44,21 @@ ) from abc import ABC, abstractmethod -from typing import AsyncIterator, Awaitable, Callable, List, Dict, Any, Type, Optional, Iterator, Tuple, Set, get_origin, Union +from typing import ( + AsyncIterator, + Awaitable, + Callable, + List, + Dict, + Any, + Type, + Optional, + Iterator, + Tuple, + Set, + get_origin, + Union, +) logger = logging.getLogger(__name__) @@ -389,6 +403,7 @@ class FileSearchTool(Tool): :param vector_store_ids: A list of vector store IDs to search for files. :type vector_store_ids: list[str] """ + def __init__(self, vector_store_ids: Optional[List[str]] = None): if vector_store_ids is None: self.vector_store_ids = set() @@ -440,6 +455,7 @@ class CodeInterpreterTool(Tool): :param file_ids: A list of file IDs to interpret. :type file_ids: list[str] """ + def __init__(self, file_ids: Optional[List[str]] = None): if file_ids is None: self.file_ids = set() @@ -452,7 +468,7 @@ def add_file(self, file_id: str) -> None: :param file_id: The ID of the file to interpret. :type file_id: str - """ + """ self.file_ids.add(file_id) def remove_file(self, file_id: str) -> None: @@ -1027,6 +1043,7 @@ class ThreadMessages: :return: A collection of messages. :rtype: ~azure.ai.projects.models.ThreadMessages """ + def __init__(self, pageable_list: OpenAIPageableListOfThreadMessage): self._messages = pageable_list.data @@ -1038,23 +1055,24 @@ def messages(self) -> List[ThreadMessage]: @property def text_messages(self) -> List[MessageTextContent]: """Returns all text message contents in the messages.""" - texts = [content for msg in self._messages for content in msg.content if isinstance(content, MessageTextContent)] + texts = [ + content for msg in self._messages for content in msg.content if isinstance(content, MessageTextContent) + ] return texts @property def image_contents(self) -> List[MessageImageFileContent]: """Returns all image file contents from image message contents in the messages.""" return [ - content for msg in self._messages - for content in msg.content - if isinstance(content, MessageImageFileContent) + content for msg in self._messages for content in msg.content if isinstance(content, MessageImageFileContent) ] @property def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: """Returns all file citation annotations from text message annotations in the messages.""" annotations = [ - annotation for msg in self._messages + annotation + for msg in self._messages for content in msg.content if isinstance(content, MessageTextContent) for annotation in content.text.annotations @@ -1066,7 +1084,8 @@ def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: def file_path_annotations(self) -> List[MessageTextFilePathAnnotation]: """Returns all file path annotations from text message annotations in the messages.""" annotations = [ - annotation for msg in self._messages + annotation + for msg in self._messages for content in msg.content if isinstance(content, MessageTextContent) for annotation in content.text.annotations @@ -1076,30 +1095,30 @@ def file_path_annotations(self) -> List[MessageTextFilePathAnnotation]: def get_last_message_by_sender(self, sender: str) -> Optional[ThreadMessage]: """Returns the last message from the specified sender. - + :param sender: The role of the sender. :type sender: str :return: The last message from the specified sender. :rtype: ~azure.ai.projects.models.ThreadMessage """ - for msg in (self._messages): + for msg in self._messages: if msg.role == sender: return msg return None def get_last_text_message_by_sender(self, sender: str) -> Optional[MessageTextContent]: """Returns the last text message from the specified sender. - + :param sender: The role of the sender. :type sender: str :return: The last text message from the specified sender. :rtype: ~azure.ai.projects.models.MessageTextContent """ - for msg in (self._messages): + for msg in self._messages: if msg.role == sender: - for content in (msg.content): + for content in msg.content: if isinstance(content, MessageTextContent): return content return None diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index d85f3ac1f10c..a4b0f7e54adc 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -4913,7 +4913,7 @@ def get_file(self, file_id: str, **kwargs: Any) -> _models.OpenAIFile: return deserialized # type: ignore @distributed_trace - def get_file_content(self, file_id: str, **kwargs: Any) -> bytes: + def _get_file_content(self, file_id: str, **kwargs: Any) -> bytes: """Retrieves the raw content of a specific file. :param file_id: The ID of the file to retrieve. Required. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index f5946344739b..a66c82f2e698 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -132,9 +132,7 @@ def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": ) from azure.core.credentials import AzureKeyCredential - client = EmbeddingsClient( - endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) - ) + client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth logger.debug( @@ -2014,9 +2012,9 @@ def create_vector_store_file_batch_and_poll( ) return vector_store_file_batch - + @distributed_trace - def get_file_content_stream(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: + def get_file_content(self, file_id: str, **kwargs: Any) -> Iterator[bytes]: """ Returns file content as byte stream for given file_id. @@ -2026,8 +2024,8 @@ def get_file_content_stream(self, file_id: str, **kwargs: Any) -> Iterator[bytes :rtype: Iterator[bytes] :raises ~azure.core.exceptions.HttpResponseError: If the HTTP request fails. """ - kwargs['stream'] = True - response = super().get_file_content(file_id, **kwargs) + kwargs["stream"] = True + response = super()._get_file_content(file_id, **kwargs) return cast(Iterator[bytes], response) @distributed_trace @@ -2040,10 +2038,10 @@ def get_messages( order: Optional[Union[str, _models.ListSortOrder]] = None, after: Optional[str] = None, before: Optional[str] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.ThreadMessages: """Parses the OpenAIPageableListOfThreadMessage response and returns a ThreadMessages object. - + :param thread_id: Identifier of the thread. Required. :type thread_id: str :keyword run_id: Filter messages by the run ID that generated them. Default value is None. @@ -2068,16 +2066,13 @@ def get_messages( :return: ThreadMessages. The ThreadMessages is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadMessages """ - messages = super().list_messages(thread_id, run_id=run_id, limit=limit, order=order, after=after, before=before, **kwargs) + messages = super().list_messages( + thread_id, run_id=run_id, limit=limit, order=order, after=after, before=before, **kwargs + ) return _models.ThreadMessages(pageable_list=messages) @distributed_trace - def save_file( - self, - file_id: str, - file_name: str, - target_dir: Optional[Union[str, Path]] = None - ) -> None: + def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: """ Saves file content retrieved using a file identifier to the specified local directory. @@ -2109,7 +2104,7 @@ def save_file( # Get file content try: - file_content_stream = self.get_file_content_stream(file_id) + file_content_stream = self.get_file_content(file_id) if not file_content_stream: error_msg = f"No content retrievable for file ID '{file_id}'." logger.error(error_msg) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py index 87bd6ad44094..d829251a188f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py @@ -40,7 +40,9 @@ with project_client: # upload a file and wait for it to be processed - file = project_client.agents.upload_file_and_poll(file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.AGENTS) + file = project_client.agents.upload_file_and_poll( + file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.AGENTS + ) print(f"Uploaded file, file ID: {file.id}") code_interpreter = CodeInterpreterTool(file_ids=[file.id]) @@ -60,7 +62,9 @@ # create a message message = project_client.agents.create_message( - thread_id=thread.id, role="user", content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?" + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", ) print(f"Created message, message ID: {message.id}") @@ -95,4 +99,4 @@ print(f"End Index: {file_path_annotation.end_index}") project_client.agents.delete_agent(agent.id) - print("Deleted agent") \ No newline at end of file + print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py index 4d21e7bf59af..7dd4c40a7219 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py @@ -50,21 +50,21 @@ def get_sync_client(self, **kwargs) -> AIProjectClient: def get_async_client(self, **kwargs) -> AIProjectClientAsync: conn_str = kwargs.pop("azure_ai_projects_connections_tests_project_connection_string") project_client = AIProjectClientAsync.from_connection_string( - credential= self.get_credential(AIProjectClient, is_async=True), + credential=self.get_credential(AIProjectClient, is_async=True), conn_str=conn_str, logging_enable=LOGGING_ENABLED, ) return project_client - + @classmethod def validate_connection( - cls, - connection: ConnectionProperties, - with_credentials: bool, - *, - expected_connection_type: ConnectionType = None, - expected_connection_name: str = None, - expected_authentication_type: AuthenticationType = None + cls, + connection: ConnectionProperties, + with_credentials: bool, + *, + expected_connection_type: ConnectionType = None, + expected_connection_name: str = None, + expected_authentication_type: AuthenticationType = None ): assert connection.id is not None diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py index 832283818120..9cc5a7d99121 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py @@ -20,72 +20,96 @@ def test_connections_get(self, **kwargs): with self.get_sync_client(**kwargs) as project_client: - connection = project_client.connections.get( - connection_name=aoai_connection, - with_credentials=False - ) + connection = project_client.connections.get(connection_name=aoai_connection, with_credentials=False) print(connection) - ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) - - connection = project_client.connections.get( - connection_name=aoai_connection, - with_credentials=True + ConnectionsTestBase.validate_connection( + connection, + False, + expected_connection_name=aoai_connection, + expected_connection_type=ConnectionType.AZURE_OPEN_AI, ) - print(connection) - ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) - connection = project_client.connections.get( - connection_name=serverless_connection, - with_credentials=False - ) + connection = project_client.connections.get(connection_name=aoai_connection, with_credentials=True) print(connection) - ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) - - connection = project_client.connections.get( - connection_name=serverless_connection, - with_credentials=True + ConnectionsTestBase.validate_connection( + connection, + True, + expected_connection_name=aoai_connection, + expected_connection_type=ConnectionType.AZURE_OPEN_AI, ) + + connection = project_client.connections.get(connection_name=serverless_connection, with_credentials=False) print(connection) - ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) + ConnectionsTestBase.validate_connection( + connection, + False, + expected_connection_name=serverless_connection, + expected_connection_type=ConnectionType.SERVERLESS, + ) + connection = project_client.connections.get(connection_name=serverless_connection, with_credentials=True) + print(connection) + ConnectionsTestBase.validate_connection( + connection, + True, + expected_connection_name=serverless_connection, + expected_connection_type=ConnectionType.SERVERLESS, + ) @servicePreparerConnectionsTests() @recorded_by_proxy def test_connections_get_default(self, **kwargs): default_aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_default_aoai_connection_name") - default_serverless_connection = kwargs.pop("azure_ai_projects_connections_tests_default_serverless_connection_name") + default_serverless_connection = kwargs.pop( + "azure_ai_projects_connections_tests_default_serverless_connection_name" + ) with self.get_sync_client(**kwargs) as project_client: connection = project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, - with_credentials=False + connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=False ) print(connection) - ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=default_aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) + ConnectionsTestBase.validate_connection( + connection, + False, + expected_connection_name=default_aoai_connection, + expected_connection_type=ConnectionType.AZURE_OPEN_AI, + ) connection = project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, - with_credentials=True + connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True ) print(connection) - ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=default_aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) + ConnectionsTestBase.validate_connection( + connection, + True, + expected_connection_name=default_aoai_connection, + expected_connection_type=ConnectionType.AZURE_OPEN_AI, + ) connection = project_client.connections.get_default( - connection_type=ConnectionType.SERVERLESS, - with_credentials=False + connection_type=ConnectionType.SERVERLESS, with_credentials=False ) print(connection) - ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=default_serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) + ConnectionsTestBase.validate_connection( + connection, + False, + expected_connection_name=default_serverless_connection, + expected_connection_type=ConnectionType.SERVERLESS, + ) connection = project_client.connections.get_default( - connection_type=ConnectionType.SERVERLESS, - with_credentials=True + connection_type=ConnectionType.SERVERLESS, with_credentials=True ) print(connection) - ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=default_serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) - + ConnectionsTestBase.validate_connection( + connection, + True, + expected_connection_name=default_serverless_connection, + expected_connection_type=ConnectionType.SERVERLESS, + ) @servicePreparerConnectionsTests() @recorded_by_proxy @@ -120,4 +144,3 @@ def test_connections_list(self, **kwargs): assert count_all > 2 assert count_all > count_aoai assert count_all > count_serverless - diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py index b26f4a4a2a8a..9c21ee95c2d5 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py @@ -19,72 +19,100 @@ async def test_connections_get(self, **kwargs): async with self.get_async_client(**kwargs) as project_client: - connection = await project_client.connections.get( - connection_name=aoai_connection, - with_credentials=False - ) + connection = await project_client.connections.get(connection_name=aoai_connection, with_credentials=False) print(connection) - ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) - - connection = await project_client.connections.get( - connection_name=aoai_connection, - with_credentials=True + ConnectionsTestBase.validate_connection( + connection, + False, + expected_connection_name=aoai_connection, + expected_connection_type=ConnectionType.AZURE_OPEN_AI, ) + + connection = await project_client.connections.get(connection_name=aoai_connection, with_credentials=True) print(connection) - ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) + ConnectionsTestBase.validate_connection( + connection, + True, + expected_connection_name=aoai_connection, + expected_connection_type=ConnectionType.AZURE_OPEN_AI, + ) connection = await project_client.connections.get( - connection_name=serverless_connection, - with_credentials=False + connection_name=serverless_connection, with_credentials=False ) print(connection) - ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) + ConnectionsTestBase.validate_connection( + connection, + False, + expected_connection_name=serverless_connection, + expected_connection_type=ConnectionType.SERVERLESS, + ) connection = await project_client.connections.get( - connection_name=serverless_connection, - with_credentials=True + connection_name=serverless_connection, with_credentials=True ) print(connection) - ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) - + ConnectionsTestBase.validate_connection( + connection, + True, + expected_connection_name=serverless_connection, + expected_connection_type=ConnectionType.SERVERLESS, + ) @servicePreparerConnectionsTests() @recorded_by_proxy_async async def test_connections_get_default(self, **kwargs): default_aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_default_aoai_connection_name") - default_serverless_connection = kwargs.pop("azure_ai_projects_connections_tests_default_serverless_connection_name") + default_serverless_connection = kwargs.pop( + "azure_ai_projects_connections_tests_default_serverless_connection_name" + ) async with self.get_async_client(**kwargs) as project_client: connection = await project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, - with_credentials=False + connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=False ) print(connection) - ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=default_aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) + ConnectionsTestBase.validate_connection( + connection, + False, + expected_connection_name=default_aoai_connection, + expected_connection_type=ConnectionType.AZURE_OPEN_AI, + ) connection = await project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, - with_credentials=True + connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True ) print(connection) - ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=default_aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI) + ConnectionsTestBase.validate_connection( + connection, + True, + expected_connection_name=default_aoai_connection, + expected_connection_type=ConnectionType.AZURE_OPEN_AI, + ) connection = await project_client.connections.get_default( - connection_type=ConnectionType.SERVERLESS, - with_credentials=False + connection_type=ConnectionType.SERVERLESS, with_credentials=False ) print(connection) - ConnectionsTestBase.validate_connection(connection, False, expected_connection_name=default_serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) + ConnectionsTestBase.validate_connection( + connection, + False, + expected_connection_name=default_serverless_connection, + expected_connection_type=ConnectionType.SERVERLESS, + ) connection = await project_client.connections.get_default( - connection_type=ConnectionType.SERVERLESS, - with_credentials=True + connection_type=ConnectionType.SERVERLESS, with_credentials=True ) print(connection) - ConnectionsTestBase.validate_connection(connection, True, expected_connection_name=default_serverless_connection, expected_connection_type=ConnectionType.SERVERLESS) - + ConnectionsTestBase.validate_connection( + connection, + True, + expected_connection_name=default_serverless_connection, + expected_connection_type=ConnectionType.SERVERLESS, + ) @servicePreparerConnectionsTests() @recorded_by_proxy_async @@ -119,5 +147,3 @@ async def test_connections_list_async(self, **kwargs): assert count_all > 2 assert count_all > count_aoai assert count_all > count_serverless - - diff --git a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py index f413107f2c72..9f644715e861 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py @@ -45,7 +45,7 @@ def get_sync_client(self, **kwargs) -> AIProjectClient: def get_async_client(self, **kwargs) -> AIProjectClientAsync: conn_str = kwargs.pop("azure_ai_projects_inference_tests_project_connection_string") project_client = AIProjectClientAsync.from_connection_string( - credential= self.get_credential(AIProjectClientAsync, is_async=True), + credential=self.get_credential(AIProjectClientAsync, is_async=True), conn_str=conn_str, logging_enable=LOGGING_ENABLED, ) diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index 5a069d0df760..d74d92141c4c 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: d46982df7fb73959661c21c4ee47d56935ea8e22 +commit: 5ec4b1fea1a0d5fb578bb7705a6dd61f39dcfea3 repo: Azure/azure-rest-api-specs additionalDirectories: From 147e7fa2ebefb0055392d7b7e34b73b26615fb97 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 30 Oct 2024 10:11:27 -0700 Subject: [PATCH 062/138] Enable tracing (#38185) --- .../ai/projects/aio/operations/_patch.py | 61 ++++---- .../azure/ai/projects/operations/_patch.py | 130 ++++++++++++++---- ...erence_client_and_azure_monitor_tracing.py | 49 +++++++ ...ai_inference_client_and_console_tracing.py | 53 +++++++ ...openai_client_and_azure_monitor_tracing.py | 56 ++++++++ ...azure_openai_client_and_console_tracing.py | 50 +++++++ .../diagnostics/diagnostics_test_base.py | 2 +- .../tests/diagnostics/test_diagnostics.py | 31 +++-- .../diagnostics/test_diagnostics_async.py | 30 ++-- 9 files changed, 388 insertions(+), 74 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 67a488e8b391..4bd27a15e8fd 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -8,10 +8,11 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ from ..._vendor import FileType -import io +import sys import logging import os import time +from io import TextIOWrapper from typing import IO, Any, AsyncIterator, Dict, List, Iterable, MutableMapping, Optional, Union, cast, overload from azure.ai.projects import _types @@ -27,6 +28,7 @@ GetWorkspaceResponse, ) from ... import models as _models +from ...operations._patch import _enable_telemetry from azure.core.tracing.decorator_async import distributed_trace_async logger = logging.getLogger(__name__) @@ -183,7 +185,7 @@ async def get_azure_openai_client(self, *, api_version: str | None = None, **kwa from azure.identity import get_bearer_token_provider except ModuleNotFoundError as _: raise ModuleNotFoundError( - "azure.identity package not installed. Please install it using 'pip install azure.identity'" + "azure.identity package not installed. Please install it using 'pip install azure-identity'" ) client = AsyncAzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider @@ -310,43 +312,50 @@ async def list( class DiagnosticsOperations(DiagnosticsOperationsGenerated): - connection_string: Optional[str] = None - """ Application Insights connection string. Call `enable()` to populate this property. """ + _connection_string: Optional[str] = None + _get_connection_string_called: bool = False def __init__(self, *args, **kwargs): self._outer_instance = kwargs.pop("outer_instance") super().__init__(*args, **kwargs) - @distributed_trace_async - async def enable(self, **kwargs) -> bool: - """Enable Application Insights tracing. - This method makes service calls to get the properties of the Applications Insights resource - connected to the Azure AI Studio Project. If Application Insights was not enabled for this project, - this method will return False. Otherwise, it will return True. In this case the Application Insights - connection string can be accessed via the `.diagnostics.connection_string` property. - - :return: True if Application Insights tracing was enabled. False otherwise. - :rtype: bool + async def get_connection_string(self) -> str: """ + Get the Application Insights connection string associated with the Project's Application Insights resource. + On first call, this method makes a GET call to the Application Insights resource URL to get the connection string. + Subsequent calls return the cached connection string. - if not self.connection_string: - # Get the AI Studio Project properties + :return: The connection string, or `None` if an Application Insights resource was not enabled for the Project. + :rtype: str + """ + if not self._get_connection_string_called: + # Get the AI Studio Project properties, including Application Insights resource URL if exists get_workspace_response: GetWorkspaceResponse = await self._outer_instance.connections._get_workspace() - # No Application Insights resource was enabled for this Project - if not get_workspace_response.properties.application_insights: - return False + # Continue only if Application Insights resource was enabled for this Project + if get_workspace_response.properties.application_insights: - app_insights_respose: GetAppInsightsResponse = await self.get_app_insights( - app_insights_resource_url=get_workspace_response.properties.application_insights - ) + # Make a GET call to the Application Insights resource URL to get the connection string + app_insights_respose: GetAppInsightsResponse = await self.get_app_insights( + app_insights_resource_url=get_workspace_response.properties.application_insights + ) - if not app_insights_respose.properties.connection_string: - raise ValueError("Application Insights resource does not have a connection string") + self._connection_string = app_insights_respose.properties.connection_string - self.connection_string = app_insights_respose.properties.connection_string + self._get_connection_string_called = True + return self._connection_string - return True + + # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? + # TODO: This could be a class method. But we don't have a class property AIProjectClient.diagnostics + def enable(self, *, destination: Union[TextIOWrapper, str] , **kwargs) -> None: + """Enable tracing to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) collector. + + :keyword destination: `sys.stdout` for tracing to console output, or a string holding the + endpoint URL of the OpenTelemetry Protocol (OTLP) collector. Required. + :paramtype destination: Union[TextIOWrapper, str] + """ + _enable_telemetry(destination=destination, **kwargs) class AgentsOperations(AgentsOperationsGenerated): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index a66c82f2e698..cb9d542a91d7 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -8,7 +8,7 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ import sys, io, logging, os, time -from io import IOBase +from io import IOBase, TextIOWrapper from typing import List, Iterable, Union, IO, Any, Dict, Optional, overload, TYPE_CHECKING, Iterator, cast from pathlib import Path @@ -314,45 +314,125 @@ def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) return connection_properties_list +# Internal helper function to enable tracing, used by both sync and async clients +def _enable_telemetry(destination: Union[TextIOWrapper, str] , **kwargs) -> None: + """Enable tracing to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) collector. + :keyword destination: `sys.stdout` for tracing to console output, or a string holding the + endpoint URL of the OpenTelemetry Protocol (OTLP) collector. Required. + :paramtype destination: Union[TextIOWrapper, str] + """ + if isinstance(destination, str): + # `destination`` is the OTLP collector URL + # See: https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html#usage + try: + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.trace.export import SimpleSpanProcessor + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "OpenTelemetry package is not installed. Please install it using 'pip install opentelemetry-sdk'" + ) + try: + from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "OpenTelemetry package is not installed. Please install it using 'pip install opentelemetry-exporter-otlp-proto-http'" + ) + from azure.core.settings import settings + settings.tracing_implementation = "opentelemetry" + trace.set_tracer_provider(TracerProvider()) + trace.get_tracer_provider().add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint=destination))) + + elif isinstance(destination, TextIOWrapper): + if destination is sys.stdout: + # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter + try: + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "OpenTelemetry package is not installed. Please install it using 'pip install opentelemetry-sdk'" + ) + from azure.core.settings import settings + settings.tracing_implementation = "opentelemetry" + trace.set_tracer_provider(TracerProvider()) + trace.get_tracer_provider().add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) + else: + raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIOWrapper`") + else: + raise ValueError("Destination must be a string or a `TextIOWrapper` object") + + # Silently try to load a set of relevant Instrumentors + try: + from azure.ai.inference.tracing import AIInferenceInstrumentor + instrumentor = AIInferenceInstrumentor() + if not instrumentor.is_instrumented(): + instrumentor.instrument() + except ModuleNotFoundError as _: + logger.warning("Could not call `AIInferenceInstrumentor().instrument()` since `azure-ai-inference` is not installed") + + try: + from opentelemetry.instrumentation.openai import OpenAIInstrumentor + OpenAIInstrumentor().instrument() + except ModuleNotFoundError as _: + logger.warning("Could not call `OpenAIInstrumentor().instrument()` since `opentelemetry-instrumentation-openai` is not installed") + + try: + from opentelemetry.instrumentation.langchain import LangchainInstrumentor + LangchainInstrumentor().instrument() + except ModuleNotFoundError as _: + logger.warning("Could not call LangchainInstrumentor().instrument()` since `opentelemetry-instrumentation-langchain` is not installed") + + +# TODO: change this to TelemetryOperations class DiagnosticsOperations(DiagnosticsOperationsGenerated): - connection_string: Optional[str] = None - """ Application Insights connection string. Call `enable()` to populate this property. """ + _connection_string: Optional[str] = None + _get_connection_string_called: bool = False def __init__(self, *args, **kwargs): self._outer_instance = kwargs.pop("outer_instance") super().__init__(*args, **kwargs) - @distributed_trace - def enable(self, **kwargs) -> bool: - """Enable Application Insights tracing. - This method makes service calls to get the properties of the Applications Insights resource - connected to the Azure AI Studio Project. If Application Insights was not enabled for this project, - this method will return False. Otherwise, it will return True. In this case the Application Insights - connection string can be accessed via the `.diagnostics.connection_string` property. - - :return: True if Application Insights tracing was enabled. False otherwise. - :rtype: bool + def get_connection_string(self) -> None: + """ + Get the Application Insights connection string associated with the Project's Application Insights resource. + On first call, this method makes a GET call to the Application Insights resource URL to get the connection string. + Subsequent calls return the cached connection string. + + :return: The connection string, or `None` if an Application Insights resource was not enabled for the Project. + :rtype: str """ - if not self.connection_string: - # Get the AI Studio Project properties + if not self._get_connection_string_called: + # Get the AI Studio Project properties, including Application Insights resource URL if exists get_workspace_response: GetWorkspaceResponse = self._outer_instance.connections._get_workspace() - # No Application Insights resource was enabled for this Project - if not get_workspace_response.properties.application_insights: - return False + # Continue only if Application Insights resource was enabled for this Project + if get_workspace_response.properties.application_insights: - app_insights_respose: GetAppInsightsResponse = self.get_app_insights( - app_insights_resource_url=get_workspace_response.properties.application_insights - ) + # Make a GET call to the Application Insights resource URL to get the connection string + app_insights_respose: GetAppInsightsResponse = self.get_app_insights( + app_insights_resource_url=get_workspace_response.properties.application_insights + ) + + self._connection_string = app_insights_respose.properties.connection_string - if not app_insights_respose.properties.connection_string: - raise ValueError("Application Insights resource does not have a connection string") + self._get_connection_string_called = True + return self._connection_string - self.connection_string = app_insights_respose.properties.connection_string - return True + # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? + # TODO: This could be a class method. But we don't have a class property AIProjectClient.diagnostics + def enable(self, *, destination: Union[TextIOWrapper, str] , **kwargs) -> None: + """Enable tracing to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) collector. + + :keyword destination: `sys.stdout` for tracing to console output, or a string holding the + endpoint URL of the OpenTelemetry Protocol (OTLP) collector. Required. + :paramtype destination: Union[TextIOWrapper, str] + """ + _enable_telemetry(destination=destination, **kwargs) class AgentsOperations(AgentsOperationsGenerated): diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py new file mode 100644 index 000000000000..99f62c9bc457 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py @@ -0,0 +1,49 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + ChatCompletionsClient from the azure.ai.inference package. The client + is already instrumented to upload traces to Azure Monitor. View the results + in the "Tracing" tab in your Azure AI Studio project page. + +USAGE: + python sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-inference azure-identity azure.monitor.opentelemetry + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true - Optional. For detailed traces, including chat request and response messages. +""" +import os +from azure.ai.projects import AIProjectClient +from azure.ai.inference.models import UserMessage +from azure.identity import DefaultAzureCredential +from azure.monitor.opentelemetry import configure_azure_monitor + +with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) as project_client: + + # Enable Azure Monitor tracing. Set environment variable `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true` + # for detailed logs, including chat request and response messages. + application_insights_connection_string = project_client.diagnostics.get_connection_string() + if not application_insights_connection_string: + print("Application Insights was not enabled for this project.") + print("Enable it via the 'Tracing' tab in your AI Studio project page.") + exit() + configure_azure_monitor(connection_string=application_insights_connection_string) + + # Get an authenticated azure.ai.inference ChatCompletionsClient for your default Serverless connection: + with project_client.inference.get_chat_completions_client() as client: + + response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py new file mode 100644 index 000000000000..0c9a405038f3 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py @@ -0,0 +1,53 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + ChatCompletionsClient from the azure.ai.inference package. The client + is already instrumented with console OpenTelemetry tracing. + +USAGE: + python sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py + + Before running the sample: + + pip install azure-ai-projects azure-ai-inference azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true - Optional. For detailed traces, including chat request and response messages. +""" +import os +import sys +from azure.ai.projects import AIProjectClient +from azure.ai.inference.models import UserMessage +from azure.identity import DefaultAzureCredential + +with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) as project_client: + + # Enable console tracing. Set environment variable `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true` + # for detailed logs, including chat request and response messages. + project_client.diagnostics.enable(destination=sys.stdout) + """ + if not project_client.diagnostics.db_enable(destination=sys.stdout): + print("Application Insights was not enabled for this project.") + print("Enable it via the 'Tracing' tab under 'Tools', in your AI Studio project page.") + exit() + + print(f"Applications Insights connection string = {project_client.diagnostics.connection_string}") + + configure_azure_monitor(connection_string=project_client.diagnostics.application_insights.connection_string) + """ + + # Get an authenticated azure.ai.inference ChatCompletionsClient for your default Serverless connection: + with project_client.inference.get_chat_completions_client() as client: + + response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py new file mode 100644 index 000000000000..4c421acac2e9 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py @@ -0,0 +1,56 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + AzureOpenAI client from the openai package. The client is already instrumented + to upload traces to Azure Monitor. View the results in the "Tracing" tab in your + Azure AI Studio project page. + +USAGE: + python sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py + + Before running the sample: + + pip install azure-ai-projects openai azure.monitor.opentelemetry opentelemetry-instrumentation-openai + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true - Optional. For detailed traces, including chat request and response messages. +""" +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.monitor.opentelemetry import configure_azure_monitor + +with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) as project_client: + + # Enable Azure Monitor tracing. Set environment variable `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true` + # for detailed logs, including chat request and response messages. + application_insights_connection_string = project_client.diagnostics.get_connection_string() + if not application_insights_connection_string: + print("Application Insights was not enabled for this project.") + print("Enable it via the 'Tracing' tab in your AI Studio project page.") + exit() + configure_azure_monitor(connection_string=application_insights_connection_string) + + # Get an authenticated OpenAI client for your default Azure OpenAI connection: + with project_client.inference.get_azure_openai_client(api_version="2024-06-01") as client: + + response = client.chat.completions.create( + model="gpt-4-0613", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py new file mode 100644 index 000000000000..df703286a343 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py @@ -0,0 +1,50 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +DESCRIPTION: + Given an AIProjectClient, this sample demonstrates how to get an authenticated + AzureOpenAI client from the openai package. The client is already instrumented + with console OpenTelemetry tracing. + +USAGE: + python sample_chat_completions_with_azure_openai_client_and_console_tracing.py + + Before running the sample: + + pip install azure-ai-projects openai opentelemetry.instrumentation.openai opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true - Optional. For detailed traces, including chat request and response messages. +""" +import os +import sys +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential + +with AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) as project_client: + + # Enable console tracing. Set environment variable `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true` + # for detailed logs, including chat request and response messages. + project_client.diagnostics.enable(destination=sys.stdout) + + # Get an authenticated OpenAI client for your default Azure OpenAI connection: + with project_client.inference.get_azure_openai_client(api_version="2024-06-01") as client: + + response = client.chat.completions.create( + model="gpt-4-0613", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + + print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py b/sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py index c6557d8d49bf..a2bb72fb0254 100644 --- a/sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py @@ -21,7 +21,7 @@ ) # Set to True to enable SDK logging -LOGGING_ENABLED = True +LOGGING_ENABLED = False if LOGGING_ENABLED: # Create a logger for the 'azure' SDK diff --git a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py index 9a9b796c452f..c14a0b5ead80 100644 --- a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py +++ b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py @@ -2,8 +2,9 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ - +import sys from devtools_testutils import recorded_by_proxy +from azure.ai.projects import AIProjectClient from diagnostics_test_base import DiagnosticsTestBase, servicePreparerDiagnosticsTests @@ -12,14 +13,22 @@ class TestDiagnostics(DiagnosticsTestBase): @servicePreparerDiagnosticsTests() @recorded_by_proxy - def test_diagnostics(self, **kwargs): + def test_diagnostics_get_connection_string(self, **kwargs): + with self.get_sync_client(**kwargs) as project_client: + connection_string = project_client.diagnostics.get_connection_string() + print(connection_string) + assert connection_string + assert bool(DiagnosticsTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match(connection_string)) + assert connection_string == project_client.diagnostics.get_connection_string() + + @servicePreparerDiagnosticsTests() + def test_diagnostics_enable_console_tracing(self, **kwargs): + with self.get_sync_client(**kwargs) as project_client: + project_client.diagnostics.enable(destination=sys.stdout) + #TODO: Create inference client and do chat completions. How do I know if traces were emitted? + + @servicePreparerDiagnosticsTests() + def test_diagnostics_enable_otlp_tracing(self, **kwargs): with self.get_sync_client(**kwargs) as project_client: - assert project_client.diagnostics.connection_string == None - assert project_client.diagnostics.enable() == True - assert project_client.diagnostics.connection_string is not None - assert bool( - DiagnosticsTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match( - project_client.diagnostics.connection_string - ) - ) - assert project_client.diagnostics.enable() == True + project_client.diagnostics.enable(destination="https://some.otlp.collector.endpoint") + #TODO: Create inference client and do chat completions. Test proxy will log attempt at telemetry call. diff --git a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py index 29d1b0bd5075..aca665503138 100644 --- a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py +++ b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ - +import sys from devtools_testutils.aio import recorded_by_proxy_async from diagnostics_test_base import DiagnosticsTestBase, servicePreparerDiagnosticsTests @@ -12,14 +12,22 @@ class TestDiagnosticsAsync(DiagnosticsTestBase): @servicePreparerDiagnosticsTests() @recorded_by_proxy_async - async def test_diagnostics_async(self, **kwargs): + async def test_diagnostics_get_connection_string_async(self, **kwargs): + async with self.get_async_client(**kwargs) as project_client: + connection_string = await project_client.diagnostics.get_connection_string() + print(connection_string) + assert connection_string + assert bool(DiagnosticsTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match(connection_string)) + assert connection_string == await project_client.diagnostics.get_connection_string() + + @servicePreparerDiagnosticsTests() + async def test_diagnostics_enable_console_tracing_async(self, **kwargs): + async with self.get_async_client(**kwargs) as project_client: + project_client.diagnostics.enable(destination=sys.stdout) + #TODO: Create inference client and do chat completions. How do I know if traces were emitted? + + @servicePreparerDiagnosticsTests() + async def test_diagnostics_enable_otlp_tracing(self, **kwargs): async with self.get_async_client(**kwargs) as project_client: - assert project_client.diagnostics.connection_string == None - assert await project_client.diagnostics.enable() == True - assert project_client.diagnostics.connection_string is not None - assert bool( - DiagnosticsTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match( - project_client.diagnostics.connection_string - ) - ) - assert await project_client.diagnostics.enable() == True + project_client.diagnostics.enable(destination="https://some.otlp.collector.endpoint") + #TODO: Create inference client and do chat completions. Test proxy will log attempt at telemetry call. \ No newline at end of file From 3ed16a7d3cb8c5b53cdf6e9cf052b1c244c0608c Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Wed, 30 Oct 2024 11:07:53 -0700 Subject: [PATCH 063/138] added validation to tools and tools_resources, added update agent (#38195) --- .../ai/projects/aio/operations/_patch.py | 277 +++++++++++++++++ .../azure/ai/projects/operations/_patch.py | 281 +++++++++++++++++- .../tests/agents/test_agents_client.py | 79 ++++- 3 files changed, 633 insertions(+), 4 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 4bd27a15e8fd..0d31726c333f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -574,6 +574,283 @@ async def create_agent( metadata=metadata, **kwargs, ) + + @overload + async def update_agent( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_agent( + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def update_agent( + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + + @overload + async def update_agent( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def update_agent( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + self._validate_tools_and_tool_resources(tools, tool_resources) + + if body is not _Unset: + if isinstance(body, io.IOBase): + return await super().update_agent(body=body, content_type=content_type, **kwargs) + return super().update_agent(body=body, **kwargs) + + if toolset is not None: + self._toolset = toolset + tools = toolset.definitions + tool_resources = toolset.resources + + return await super().update_agent( + assistant_id=assistant_id, + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + def _validate_tools_and_tool_resources(self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources]): + if tool_resources is None: + return + if tools is None: + tools = [] + + if tool_resources.file_search is not None and not any(isinstance(tool, _models.FileSearchToolDefinition) for tool in tools): + raise ValueError("Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided") + if tool_resources.code_interpreter is not None and not any(isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools): + raise ValueError("Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided") + def get_toolset(self) -> Optional[_models.AsyncToolSet]: """ diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index cb9d542a91d7..b2c8d6618411 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -626,8 +626,11 @@ def create_agent( :return: An Agent object. :raises: HttpResponseError for HTTP errors. """ + + self._validate_tools_and_tool_resources(tools, tool_resources) + if body is not _Unset: - if isinstance(body, IOBase): + if isinstance(body, io.IOBase): return super().create_agent(body=body, content_type=content_type, **kwargs) return super().create_agent(body=body, **kwargs) @@ -649,7 +652,283 @@ def create_agent( metadata=metadata, **kwargs, ) + + @overload + def update_agent( + self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_agent( + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def update_agent( + self, + assistant_id: str, + *, + content_type: str = "application/json", + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + + @overload + def update_agent( + self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def update_agent( + self, + assistant_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + model: Optional[str] = None, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[_models.ToolDefinition]] = None, + tool_resources: Optional[_models.ToolResources] = None, + toolset: Optional[_models.ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + content_type: str = "application/json", + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> _models.Agent: + """Modifies an existing agent. + :param assistant_id: The ID of the agent to modify. Required. + :type assistant_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword model: The ID of the model to use. Default value is None. + :paramtype model: str + :keyword name: The modified name for the agent to use. Default value is None. + :paramtype name: str + :keyword description: The modified description for the agent to use. Default value is None. + :paramtype description: str + :keyword instructions: The modified system instructions for the new agent to use. Default value + is None. + :paramtype instructions: str + :keyword tools: The modified collection of tools to enable for the agent. Default value is + None. + :paramtype tools: list[~azure.ai.projects.models.ToolDefinition] + :keyword tool_resources: A set of resources that are used by the agent's tools. The resources + are specific to the type of tool. For example, + the ``code_interpreter`` tool requires a list of file IDs, while the ``file_search`` tool + requires a list of vector store IDs. Default value is None. + :paramtype tool_resources: ~azure.ai.projects.models.ToolResources + :keyword toolset: The Collection of tools and resources (alternative to `tools` and `tool_resources` + and adds automatic execution logic for functions). Default value is None. + :paramtype toolset: ~azure.ai.projects.models.ToolSet + :keyword temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, + while lower values like 0.2 will make it more focused and deterministic. Default value is + None. + :paramtype temperature: float + :keyword top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. + So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. Default value is None. + :paramtype top_p: float + :keyword response_format: The response format of the tool calls used by this agent. Is one of + the following types: str, Union[str, "_models.AgentsApiResponseFormatMode"], + AgentsApiResponseFormat Default value is None. + :paramtype response_format: str or str or ~azure.ai.projects.models.AgentsApiResponseFormatMode + or ~azure.ai.projects.models.AgentsApiResponseFormat + :keyword metadata: A set of up to 16 key/value pairs that can be attached to an object, used + for storing additional information about that object in a structured format. Keys may be up to + 64 characters in length and values may be up to 512 characters in length. Default value is + None. + :paramtype metadata: dict[str, str] + :return: Agent. The Agent is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.Agent + :raises ~azure.core.exceptions.HttpResponseError: + """ + self._validate_tools_and_tool_resources(tools, tool_resources) + + if body is not _Unset: + if isinstance(body, io.IOBase): + return super().update_agent(body=body, content_type=content_type, **kwargs) + return super().update_agent(body=body, **kwargs) + + if toolset is not None: + self._toolset = toolset + tools = toolset.definitions + tool_resources = toolset.resources + + return super().update_agent( + assistant_id=assistant_id, + model=model, + name=name, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + temperature=temperature, + top_p=top_p, + response_format=response_format, + metadata=metadata, + **kwargs, + ) + + def _validate_tools_and_tool_resources(self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources]): + if tool_resources is None: + return + if tools is None: + tools = [] + + if tool_resources.file_search is not None and not any(isinstance(tool, _models.FileSearchToolDefinition) for tool in tools): + raise ValueError("Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided") + if tool_resources.code_interpreter is not None and not any(isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools): + raise ValueError("Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided") + def get_toolset(self) -> Optional[_models.ToolSet]: """ Get the toolset for the agent. diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index f262cfe86cdb..e30a5d552d03 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -14,6 +14,7 @@ from azure.ai.projects import AIProjectClient from azure.ai.projects.models import FunctionTool, CodeInterpreterTool, FileSearchTool, ToolSet +from azure.ai.projects.models import CodeInterpreterToolResource, FileSearchToolResource, ToolResources from azure.core.pipeline.transport import RequestsTransport from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy from azure.core.exceptions import AzureError, ServiceRequestError, HttpResponseError @@ -89,7 +90,6 @@ def fetch_current_datetime_recordings(): # Statically defined user functions for fast reference user_functions_recording = {"fetch_current_datetime": fetch_current_datetime_recordings} -user_functions_live = {"fetch_current_datetime": fetch_current_datetime_live} # The test class name needs to start with "Test" to get collected by pytest @@ -155,7 +155,7 @@ def test_create_client(self, **kwargs): # test agent creation and deletion @agentClientPreparer() @recorded_by_proxy - def test_create_delete_agent(self, **kwargs): + def test_create_update_delete_agent(self, **kwargs): # create client client = self.create_client(**kwargs) assert isinstance(client, AIProjectClient) @@ -166,6 +166,10 @@ def test_create_delete_agent(self, **kwargs): assert agent.id print("Created agent, agent ID", agent.id) + # update agent + agent = client.agents.update_agent(agent.id, name="my-agent2", instructions="You are helpful agent") + assert agent.name == "my-agent2" + # delete agent and close client client.agents.delete_agent(agent.id) print("Deleted agent") @@ -189,13 +193,39 @@ def test_create_agent_with_tools(self, **kwargs): assert agent.id print("Created agent, agent ID", agent.id) assert agent.tools - assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + assert agent.tools[0] == functions.definitions print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) # delete agent and close client client.agents.delete_agent(agent.id) print("Deleted agent") client.close() + + # test agent creation with tools + @agentClientPreparer() + @recorded_by_proxy + def test_create_agent_with_tools_and_resources(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # initialize agent functions + functions = FunctionTool(functions=user_functions_recording) + + # create agent with tools + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=functions.definitions + ) + assert agent.id + print("Created agent, agent ID", agent.id) + assert agent.tools + assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() @agentClientPreparer() @recorded_by_proxy @@ -1074,6 +1104,49 @@ def test_get_run_step(self, **kwargs): client.agents.delete_agent(agent.id) print("Deleted agent") client.close() + + + # test agent creation with invalid tool resource + @agentClientPreparer() + @recorded_by_proxy + def test_create_agent_with_invalid_code_interpreter_tool_resource(self, **kwargs): + # create client + with self.create_client(**kwargs) as client: + + # initialize resources + tool_resources = ToolResources() + tool_resources.code_interpreter = CodeInterpreterToolResource() + + exception_message = "" + try: + client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=[], tool_resources=tool_resources + ) + except ValueError as e: + exception_message = e.args[0] + + assert exception_message == "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" + + # test agent creation with invalid tool resource + @agentClientPreparer() + @recorded_by_proxy + def test_create_agent_with_invalid_file_search_tool_resource(self, **kwargs): + # create client + with self.create_client(**kwargs) as client: + + # initialize resources + tool_resources = ToolResources() + tool_resources.file_search = FileSearchToolResource() + + exception_message = "" + try: + client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=[], tool_resources=tool_resources + ) + except ValueError as e: + exception_message = e.args[0] + + assert exception_message == "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" # # ********************************************************************************** # # From a395f29b34939003450c4df1ee75bbb1a50f9613 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 30 Oct 2024 14:54:52 -0700 Subject: [PATCH 064/138] Rename `.diagnostics` to `.telemetry` (#38209) --- .../azure/ai/projects/_client.py | 8 ++-- .../azure/ai/projects/_patch.py | 4 +- .../azure/ai/projects/aio/_client.py | 8 ++-- .../azure/ai/projects/aio/_patch.py | 4 +- .../ai/projects/aio/operations/__init__.py | 4 +- .../ai/projects/aio/operations/_operations.py | 20 ++++++---- .../ai/projects/aio/operations/_patch.py | 14 +++---- .../azure/ai/projects/models/__init__.py | 4 -- .../azure/ai/projects/models/_models.py | 40 +------------------ .../azure/ai/projects/models/_patch.py | 1 + .../azure/ai/projects/operations/__init__.py | 4 +- .../ai/projects/operations/_operations.py | 22 +++++----- .../azure/ai/projects/operations/_patch.py | 35 ++++++++++------ .../sample_agents_basics_async.py | 2 +- .../sample_agents_functions_async.py | 2 +- ...sample_agents_stream_eventhandler_async.py | 2 +- ..._stream_eventhandler_with_toolset_async.py | 2 +- .../sample_agents_stream_iteration_async.py | 2 +- ...ts_vector_store_batch_file_search_async.py | 2 +- ...gents_with_file_search_attachment_async.py | 2 +- .../samples/agents/sample_agents_basics.py | 2 +- .../agents/sample_agents_code_interpreter.py | 2 +- .../agents/sample_agents_file_search.py | 2 +- .../samples/agents/sample_agents_functions.py | 2 +- .../agents/sample_agents_run_with_toolset.py | 2 +- .../sample_agents_stream_eventhandler.py | 2 +- ...ents_stream_eventhandler_with_functions.py | 2 +- ...agents_stream_eventhandler_with_toolset.py | 2 +- .../agents/sample_agents_stream_iteration.py | 2 +- ...le_agents_stream_iteration_with_toolset.py | 2 +- ...e_agents_vector_store_batch_file_search.py | 2 +- ...mple_agents_with_file_search_attachment.py | 2 +- .../async_samples/sample_connections_async.py | 2 +- .../samples/connections/sample_connections.py | 2 +- .../async_samples/sample_evaluations_async.py | 2 +- ...ns_with_azure_ai_inference_client_async.py | 2 +- ...pletions_with_azure_openai_client_async.py | 9 ++++- ...gs_with_azure_ai_inference_client_async.py | 2 +- ...pletions_with_azure_ai_inference_client.py | 2 +- ...erence_client_and_azure_monitor_tracing.py | 8 ++-- ...ai_inference_client_and_console_tracing.py | 18 ++------- ...at_completions_with_azure_openai_client.py | 9 ++++- ...openai_client_and_azure_monitor_tracing.py | 15 ++++--- ...azure_openai_client_and_console_tracing.py | 17 +++++--- ...beddings_with_azure_ai_inference_client.py | 2 +- .../tests/agents/test_agents_client.py | 1 + .../tests/diagnostics/test_diagnostics.py | 34 ---------------- .../diagnostics/test_diagnostics_async.py | 33 --------------- .../telemetry_test_base.py} | 12 +++--- .../tests/telemetry/test_telemetry.py | 34 ++++++++++++++++ .../tests/telemetry/test_telemetry_async.py | 33 +++++++++++++++ sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 52 files changed, 217 insertions(+), 228 deletions(-) delete mode 100644 sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py delete mode 100644 sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py rename sdk/ai/azure-ai-projects/tests/{diagnostics/diagnostics_test_base.py => telemetry/telemetry_test_base.py} (80%) create mode 100644 sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry.py create mode 100644 sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry_async.py diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py index ab9ed29d4c3e..92186f3a4e53 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_client.py @@ -16,7 +16,7 @@ from ._configuration import AIProjectClientConfiguration from ._serialization import Deserializer, Serializer -from .operations import AgentsOperations, ConnectionsOperations, DiagnosticsOperations, EvaluationsOperations +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations, TelemetryOperations if TYPE_CHECKING: from azure.core.credentials import TokenCredential @@ -29,8 +29,8 @@ class AIProjectClient: :vartype agents: azure.ai.projects.operations.AgentsOperations :ivar connections: ConnectionsOperations operations :vartype connections: azure.ai.projects.operations.ConnectionsOperations - :ivar diagnostics: DiagnosticsOperations operations - :vartype diagnostics: azure.ai.projects.operations.DiagnosticsOperations + :ivar telemetry: TelemetryOperations operations + :vartype telemetry: azure.ai.projects.operations.TelemetryOperations :ivar evaluations: EvaluationsOperations operations :vartype evaluations: azure.ai.projects.operations.EvaluationsOperations :param endpoint: The Azure AI Studio project endpoint, in the form @@ -95,7 +95,7 @@ def __init__( self._serialize.client_side_validation = False self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) - self.diagnostics = DiagnosticsOperations(self._client, self._config, self._serialize, self._deserialize) + self.telemetry = TelemetryOperations(self._client, self._config, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index 9be1af9b2828..37be4bcc06d4 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -16,7 +16,7 @@ from azure.core.pipeline import policies from ._configuration import AIProjectClientConfiguration from ._serialization import Deserializer, Serializer -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations, DiagnosticsOperations +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations, TelemetryOperations from ._client import AIProjectClient as ClientGenerated from .operations._patch import InferenceOperations @@ -185,7 +185,7 @@ def __init__( self._deserialize = Deserializer() self._serialize.client_side_validation = False - self.diagnostics = DiagnosticsOperations( + self.telemetry = TelemetryOperations( self._client0, self._config0, self._serialize, self._deserialize, outer_instance=self ) self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py index 2e9f614d420e..34bac8d0ec64 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_client.py @@ -16,7 +16,7 @@ from .._serialization import Deserializer, Serializer from ._configuration import AIProjectClientConfiguration -from .operations import AgentsOperations, ConnectionsOperations, DiagnosticsOperations, EvaluationsOperations +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations, TelemetryOperations if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential @@ -29,8 +29,8 @@ class AIProjectClient: :vartype agents: azure.ai.projects.aio.operations.AgentsOperations :ivar connections: ConnectionsOperations operations :vartype connections: azure.ai.projects.aio.operations.ConnectionsOperations - :ivar diagnostics: DiagnosticsOperations operations - :vartype diagnostics: azure.ai.projects.aio.operations.DiagnosticsOperations + :ivar telemetry: TelemetryOperations operations + :vartype telemetry: azure.ai.projects.aio.operations.TelemetryOperations :ivar evaluations: EvaluationsOperations operations :vartype evaluations: azure.ai.projects.aio.operations.EvaluationsOperations :param endpoint: The Azure AI Studio project endpoint, in the form @@ -95,7 +95,7 @@ def __init__( self._serialize.client_side_validation = False self.agents = AgentsOperations(self._client, self._config, self._serialize, self._deserialize) self.connections = ConnectionsOperations(self._client, self._config, self._serialize, self._deserialize) - self.diagnostics = DiagnosticsOperations(self._client, self._config, self._serialize, self._deserialize) + self.telemetry = TelemetryOperations(self._client, self._config, self._serialize, self._deserialize) self.evaluations = EvaluationsOperations(self._client, self._config, self._serialize, self._deserialize) def send_request( diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 22ecfe1c8c07..5905071bcf97 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -16,7 +16,7 @@ from .._serialization import Deserializer, Serializer from ._configuration import AIProjectClientConfiguration -from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations, DiagnosticsOperations +from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations, TelemetryOperations from ._client import AIProjectClient as ClientGenerated from .operations._patch import InferenceOperations @@ -184,7 +184,7 @@ def __init__( self._deserialize = Deserializer() self._serialize.client_side_validation = False - self.diagnostics = DiagnosticsOperations( + self.telemetry = TelemetryOperations( self._client0, self._config0, self._serialize, self._deserialize, outer_instance=self ) self.connections = ConnectionsOperations(self._client1, self._config1, self._serialize, self._deserialize) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/__init__.py index d6c4708ca4c0..64c4031e2bb6 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/__init__.py @@ -14,7 +14,7 @@ from ._operations import AgentsOperations # type: ignore from ._operations import ConnectionsOperations # type: ignore -from ._operations import DiagnosticsOperations # type: ignore +from ._operations import TelemetryOperations # type: ignore from ._operations import EvaluationsOperations # type: ignore from ._patch import __all__ as _patch_all @@ -24,7 +24,7 @@ __all__ = [ "AgentsOperations", "ConnectionsOperations", - "DiagnosticsOperations", + "TelemetryOperations", "EvaluationsOperations", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index 42fb879dce3b..cc99f2615f83 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -77,7 +77,6 @@ build_connections_get_connection_with_secrets_request, build_connections_get_workspace_request, build_connections_list_connections_request, - build_diagnostics_get_app_insights_request, build_evaluations_create_or_replace_schedule_request, build_evaluations_create_request, build_evaluations_disable_schedule_request, @@ -86,6 +85,7 @@ build_evaluations_list_request, build_evaluations_list_schedule_request, build_evaluations_update_request, + build_telemetry_get_app_insights_request, ) if sys.version_info >= (3, 9): @@ -5281,14 +5281,14 @@ async def _get_connection_with_secrets( return deserialized # type: ignore -class DiagnosticsOperations: +class TelemetryOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.projects.aio.AIProjectClient`'s - :attr:`diagnostics` attribute. + :attr:`telemetry` attribute. """ def __init__(self, *args, **kwargs) -> None: @@ -5299,7 +5299,9 @@ def __init__(self, *args, **kwargs) -> None: self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async - async def get_app_insights(self, app_insights_resource_url: str, **kwargs: Any) -> _models.GetAppInsightsResponse: + async def _get_app_insights( + self, app_insights_resource_url: str, **kwargs: Any + ) -> _models._models.GetAppInsightsResponse: # pylint: disable=line-too-long """Gets the properties of the specified Application Insights resource. @@ -5309,7 +5311,7 @@ async def get_app_insights(self, app_insights_resource_url: str, **kwargs: Any) Required. :type app_insights_resource_url: str :return: GetAppInsightsResponse. The GetAppInsightsResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.GetAppInsightsResponse + :rtype: ~azure.ai.projects.models._models.GetAppInsightsResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -5323,9 +5325,9 @@ async def get_app_insights(self, app_insights_resource_url: str, **kwargs: Any) _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.GetAppInsightsResponse] = kwargs.pop("cls", None) + cls: ClsType[_models._models.GetAppInsightsResponse] = kwargs.pop("cls", None) - _request = build_diagnostics_get_app_insights_request( + _request = build_telemetry_get_app_insights_request( app_insights_resource_url=app_insights_resource_url, api_version=self._config.api_version, headers=_headers, @@ -5360,7 +5362,9 @@ async def get_app_insights(self, app_insights_resource_url: str, **kwargs: Any) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.GetAppInsightsResponse, response.json()) + deserialized = _deserialize( + _models._models.GetAppInsightsResponse, response.json() # pylint: disable=protected-access + ) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 0d31726c333f..a254c77a09f6 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -1,4 +1,5 @@ # pylint: disable=too-many-lines +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -18,7 +19,7 @@ from azure.ai.projects import _types from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated from ._operations import AgentsOperations as AgentsOperationsGenerated -from ._operations import DiagnosticsOperations as DiagnosticsOperationsGenerated +from ._operations import TelemetryOperations as TelemetryOperationsGenerated from ...models._patch import ConnectionProperties from ...models._enums import AuthenticationType, ConnectionType, FilePurpose from ...models._models import ( @@ -310,7 +311,7 @@ async def list( return connection_properties_list -class DiagnosticsOperations(DiagnosticsOperationsGenerated): +class TelemetryOperations(TelemetryOperationsGenerated): _connection_string: Optional[str] = None _get_connection_string_called: bool = False @@ -336,7 +337,7 @@ async def get_connection_string(self) -> str: if get_workspace_response.properties.application_insights: # Make a GET call to the Application Insights resource URL to get the connection string - app_insights_respose: GetAppInsightsResponse = await self.get_app_insights( + app_insights_respose: GetAppInsightsResponse = await self._get_app_insights( app_insights_resource_url=get_workspace_response.properties.application_insights ) @@ -345,10 +346,9 @@ async def get_connection_string(self) -> str: self._get_connection_string_called = True return self._connection_string - # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? - # TODO: This could be a class method. But we don't have a class property AIProjectClient.diagnostics - def enable(self, *, destination: Union[TextIOWrapper, str] , **kwargs) -> None: + # TODO: This could be a class method. But we don't have a class property AIProjectClient.telemetry + def enable(self, *, destination: Union[TextIOWrapper, str], **kwargs) -> None: """Enable tracing to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) collector. :keyword destination: `sys.stdout` for tracing to console output, or a string holding the @@ -2299,7 +2299,7 @@ async def create_vector_store_file_batch_and_poll( __all__: List[str] = [ "AgentsOperations", "ConnectionsOperations", - "DiagnosticsOperations", + "TelemetryOperations", "InferenceOperations", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py index cf380fc258cf..0faab636008c 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py @@ -20,7 +20,6 @@ AgentThreadCreationOptions, AgentsApiResponseFormat, AgentsNamedToolChoice, - AppInsightsProperties, ApplicationInsightsConfiguration, AzureAISearchResource, AzureAISearchToolDefinition, @@ -42,7 +41,6 @@ FunctionDefinition, FunctionName, FunctionToolDefinition, - GetAppInsightsResponse, IndexResource, InputData, MessageAttachment, @@ -203,7 +201,6 @@ "AgentThreadCreationOptions", "AgentsApiResponseFormat", "AgentsNamedToolChoice", - "AppInsightsProperties", "ApplicationInsightsConfiguration", "AzureAISearchResource", "AzureAISearchToolDefinition", @@ -225,7 +222,6 @@ "FunctionDefinition", "FunctionName", "FunctionToolDefinition", - "GetAppInsightsResponse", "IndexResource", "InputData", "MessageAttachment", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 2fc919dc040a..0f37a301d0fe 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -380,23 +380,6 @@ class AppInsightsProperties(_model_base.Model): connection_string: str = rest_field(name="ConnectionString") """Authentication type of the connection target. Required.""" - @overload - def __init__( - self, - *, - connection_string: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - class InputData(_model_base.Model): """Abstract data class for input data configuration. @@ -1334,35 +1317,16 @@ class GetAppInsightsResponse(_model_base.Model): :ivar name: The name of the resource. Required. :vartype name: str :ivar properties: The properties of the resource. Required. - :vartype properties: ~azure.ai.projects.models.AppInsightsProperties + :vartype properties: ~azure.ai.projects.models._models.AppInsightsProperties """ id: str = rest_field() """A unique identifier for the resource. Required.""" name: str = rest_field() """The name of the resource. Required.""" - properties: "_models.AppInsightsProperties" = rest_field() + properties: "_models._models.AppInsightsProperties" = rest_field() """The properties of the resource. Required.""" - @overload - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - name: str, - properties: "_models.AppInsightsProperties", - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - class GetConnectionResponse(_model_base.Model): """Response from the listSecrets operation. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 3350db13b2bb..5cd8c91f51f4 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -1,5 +1,6 @@ # pylint: disable=too-many-lines # pylint: disable=too-many-lines +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/__init__.py index d6c4708ca4c0..64c4031e2bb6 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/__init__.py @@ -14,7 +14,7 @@ from ._operations import AgentsOperations # type: ignore from ._operations import ConnectionsOperations # type: ignore -from ._operations import DiagnosticsOperations # type: ignore +from ._operations import TelemetryOperations # type: ignore from ._operations import EvaluationsOperations # type: ignore from ._patch import __all__ as _patch_all @@ -24,7 +24,7 @@ __all__ = [ "AgentsOperations", "ConnectionsOperations", - "DiagnosticsOperations", + "TelemetryOperations", "EvaluationsOperations", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index a4b0f7e54adc..b766745c08ba 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -1267,9 +1267,7 @@ def build_connections_get_connection_with_secrets_request( # pylint: disable=na return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_diagnostics_get_app_insights_request( # pylint: disable=name-too-long - app_insights_resource_url: str, **kwargs: Any -) -> HttpRequest: +def build_telemetry_get_app_insights_request(app_insights_resource_url: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -6677,14 +6675,14 @@ def _get_connection_with_secrets( return deserialized # type: ignore -class DiagnosticsOperations: +class TelemetryOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.ai.projects.AIProjectClient`'s - :attr:`diagnostics` attribute. + :attr:`telemetry` attribute. """ def __init__(self, *args, **kwargs): @@ -6695,7 +6693,9 @@ def __init__(self, *args, **kwargs): self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace - def get_app_insights(self, app_insights_resource_url: str, **kwargs: Any) -> _models.GetAppInsightsResponse: + def _get_app_insights( + self, app_insights_resource_url: str, **kwargs: Any + ) -> _models._models.GetAppInsightsResponse: # pylint: disable=line-too-long """Gets the properties of the specified Application Insights resource. @@ -6705,7 +6705,7 @@ def get_app_insights(self, app_insights_resource_url: str, **kwargs: Any) -> _mo Required. :type app_insights_resource_url: str :return: GetAppInsightsResponse. The GetAppInsightsResponse is compatible with MutableMapping - :rtype: ~azure.ai.projects.models.GetAppInsightsResponse + :rtype: ~azure.ai.projects.models._models.GetAppInsightsResponse :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -6719,9 +6719,9 @@ def get_app_insights(self, app_insights_resource_url: str, **kwargs: Any) -> _mo _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[_models.GetAppInsightsResponse] = kwargs.pop("cls", None) + cls: ClsType[_models._models.GetAppInsightsResponse] = kwargs.pop("cls", None) - _request = build_diagnostics_get_app_insights_request( + _request = build_telemetry_get_app_insights_request( app_insights_resource_url=app_insights_resource_url, api_version=self._config.api_version, headers=_headers, @@ -6756,7 +6756,9 @@ def get_app_insights(self, app_insights_resource_url: str, **kwargs: Any) -> _mo if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(_models.GetAppInsightsResponse, response.json()) + deserialized = _deserialize( + _models._models.GetAppInsightsResponse, response.json() # pylint: disable=protected-access + ) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index b2c8d6618411..1877267f6247 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -1,4 +1,5 @@ # pylint: disable=too-many-lines +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -14,7 +15,7 @@ from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated from ._operations import AgentsOperations as AgentsOperationsGenerated -from ._operations import DiagnosticsOperations as DiagnosticsOperationsGenerated +from ._operations import TelemetryOperations as TelemetryOperationsGenerated from ..models._enums import AuthenticationType, ConnectionType from ..models._models import ( GetConnectionResponse, @@ -314,8 +315,9 @@ def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) return connection_properties_list + # Internal helper function to enable tracing, used by both sync and async clients -def _enable_telemetry(destination: Union[TextIOWrapper, str] , **kwargs) -> None: +def _enable_telemetry(destination: Union[TextIOWrapper, str], **kwargs) -> None: """Enable tracing to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) collector. :keyword destination: `sys.stdout` for tracing to console output, or a string holding the @@ -340,6 +342,7 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str] , **kwargs) -> None "OpenTelemetry package is not installed. Please install it using 'pip install opentelemetry-exporter-otlp-proto-http'" ) from azure.core.settings import settings + settings.tracing_implementation = "opentelemetry" trace.set_tracer_provider(TracerProvider()) trace.get_tracer_provider().add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint=destination))) @@ -356,6 +359,7 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str] , **kwargs) -> None "OpenTelemetry package is not installed. Please install it using 'pip install opentelemetry-sdk'" ) from azure.core.settings import settings + settings.tracing_implementation = "opentelemetry" trace.set_tracer_provider(TracerProvider()) trace.get_tracer_provider().add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) @@ -367,27 +371,35 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str] , **kwargs) -> None # Silently try to load a set of relevant Instrumentors try: from azure.ai.inference.tracing import AIInferenceInstrumentor + instrumentor = AIInferenceInstrumentor() if not instrumentor.is_instrumented(): instrumentor.instrument() except ModuleNotFoundError as _: - logger.warning("Could not call `AIInferenceInstrumentor().instrument()` since `azure-ai-inference` is not installed") + logger.warning( + "Could not call `AIInferenceInstrumentor().instrument()` since `azure-ai-inference` is not installed" + ) try: from opentelemetry.instrumentation.openai import OpenAIInstrumentor + OpenAIInstrumentor().instrument() except ModuleNotFoundError as _: - logger.warning("Could not call `OpenAIInstrumentor().instrument()` since `opentelemetry-instrumentation-openai` is not installed") + logger.warning( + "Could not call `OpenAIInstrumentor().instrument()` since `opentelemetry-instrumentation-openai` is not installed" + ) try: from opentelemetry.instrumentation.langchain import LangchainInstrumentor + LangchainInstrumentor().instrument() except ModuleNotFoundError as _: - logger.warning("Could not call LangchainInstrumentor().instrument()` since `opentelemetry-instrumentation-langchain` is not installed") + logger.warning( + "Could not call LangchainInstrumentor().instrument()` since `opentelemetry-instrumentation-langchain` is not installed" + ) -# TODO: change this to TelemetryOperations -class DiagnosticsOperations(DiagnosticsOperationsGenerated): +class TelemetryOperations(TelemetryOperationsGenerated): _connection_string: Optional[str] = None _get_connection_string_called: bool = False @@ -413,7 +425,7 @@ def get_connection_string(self) -> None: if get_workspace_response.properties.application_insights: # Make a GET call to the Application Insights resource URL to get the connection string - app_insights_respose: GetAppInsightsResponse = self.get_app_insights( + app_insights_respose: GetAppInsightsResponse = self._get_app_insights( app_insights_resource_url=get_workspace_response.properties.application_insights ) @@ -422,10 +434,9 @@ def get_connection_string(self) -> None: self._get_connection_string_called = True return self._connection_string - # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? - # TODO: This could be a class method. But we don't have a class property AIProjectClient.diagnostics - def enable(self, *, destination: Union[TextIOWrapper, str] , **kwargs) -> None: + # TODO: This could be a class method. But we don't have a class property AIProjectClient.telemetry + def enable(self, *, destination: Union[TextIOWrapper, str], **kwargs) -> None: """Enable tracing to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) collector. :keyword destination: `sys.stdout` for tracing to console output, or a string holding the @@ -2497,7 +2508,7 @@ def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str __all__: List[str] = [ "AgentsOperations", "ConnectionsOperations", - "DiagnosticsOperations", + "TelemetryOperations", "InferenceOperations", ] # Add all objects you want publicly available to users at this package level diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py index 69c794672e78..d50fed2268df 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py index 67bdf21fff42..4459a7b9e5c8 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py index 0aaa0458dd0f..d9cfe387163c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py index bbbdab530f1b..99afd0e762ce 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py index ab7799bb5c31..ad644d0259cb 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py index 15adab5c996a..22dc0f7ae75f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py index d7f19653554c..6dc1693dca18 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py index 66b14bfc2af3..46e638eeb828 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py index d829251a188f..d82bbaeb4d21 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py index d929dfd05afc..cc8764b7e040 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py index 401b0403ea95..d7f40fa718d1 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py index a684b367db44..f66e314abdaf 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py index a59d4e322c6b..e7f7706090fb 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py index 713ceca95243..83d5eefef99d 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index ed159e206a48..fab7a393bdd3 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration.py index 814685dd1379..196ef4ee5581 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration_with_toolset.py index 3337f28954f2..dbec2b830e3f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration_with_toolset.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_iteration_with_toolset.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py index a07cadd45e8e..89e5f6a9c177 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py index a318c2b51c4f..9710e3812f8c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py index 1f5dd56b4334..8c3591376d06 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -13,7 +13,7 @@ Before running the sample: - pip install azure.ai.projects aiohttp azure-identity + pip install azure-ai-projects aiohttp azure-identity Set these environment variables with your own values: 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in the "Project overview" diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index bbc16950709a..8c6a1b527809 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -13,7 +13,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set these environment variables with your own values: 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in the "Project overview" diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py index 2cedf876e06a..5e43d7b04dae 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity aiohttp + pip install azure-ai-projects azure-identity aiohttp Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py index 8c4f2dda13e0..b5cb6919d14e 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py @@ -14,7 +14,7 @@ Before running the sample: - pip install azure.ai.projects aiohttp azure-identity + pip install azure-ai-projects aiohttp azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py index 7de71c0a7c25..466c47466a36 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py @@ -13,10 +13,15 @@ Before running the sample: - pip install azure.ai.projects aiohttp openai_async + pip install azure-ai-projects aiohttp openai_async Set this environment variable with your own value: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + + Update the Azure OpenAI api-version as needed (see `api_version=` below). Values can be found here: + https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + + Update the model deployment name as needed. See `model=` below. """ import os import asyncio @@ -35,7 +40,7 @@ async def sample_get_azure_openai_client_async(): async with await project_client.inference.get_azure_openai_client() as client: response = await client.chat.completions.create( - model="gpt-4-0613", + model="gpt-35-turbo-16k", messages=[ { "role": "user", diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py index e14f1647b336..092a1c5bde71 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py @@ -14,7 +14,7 @@ Before running the sample: - pip install azure.ai.projects aiohttp azure-identity + pip install azure-ai-projects aiohttp azure-identity Set this environment variable with your own value: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py index 8552defe5bca..afb822298929 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py index 99f62c9bc457..21c0eedf5dc1 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py @@ -19,7 +19,8 @@ Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. - * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true - Optional. For detailed traces, including chat request and response messages. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. """ import os from azure.ai.projects import AIProjectClient @@ -32,9 +33,8 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as project_client: - # Enable Azure Monitor tracing. Set environment variable `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true` - # for detailed logs, including chat request and response messages. - application_insights_connection_string = project_client.diagnostics.get_connection_string() + # Enable Azure Monitor tracing + application_insights_connection_string = project_client.telemetry.get_connection_string() if not application_insights_connection_string: print("Application Insights was not enabled for this project.") print("Enable it via the 'Tracing' tab in your AI Studio project page.") diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py index 0c9a405038f3..34164967b7f7 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py @@ -18,7 +18,8 @@ Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. - * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true - Optional. For detailed traces, including chat request and response messages. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. """ import os import sys @@ -31,19 +32,8 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as project_client: - # Enable console tracing. Set environment variable `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true` - # for detailed logs, including chat request and response messages. - project_client.diagnostics.enable(destination=sys.stdout) - """ - if not project_client.diagnostics.db_enable(destination=sys.stdout): - print("Application Insights was not enabled for this project.") - print("Enable it via the 'Tracing' tab under 'Tools', in your AI Studio project page.") - exit() - - print(f"Applications Insights connection string = {project_client.diagnostics.connection_string}") - - configure_azure_monitor(connection_string=project_client.diagnostics.application_insights.connection_string) - """ + # Enable console tracing + project_client.telemetry.enable(destination=sys.stdout) # Get an authenticated azure.ai.inference ChatCompletionsClient for your default Serverless connection: with project_client.inference.get_chat_completions_client() as client: diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py index 5b5e794c2e1f..3194355c1ce7 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py @@ -13,10 +13,15 @@ Before running the sample: - pip install azure.ai.projects openai + pip install azure-ai-projects openai Set this environment variable with your own value: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + + Update the Azure OpenAI api-version as needed (see `api_version=` below). Values can be found here: + https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + + Update the model deployment name as needed. See `model=` below. """ import os from azure.ai.projects import AIProjectClient @@ -31,7 +36,7 @@ with project_client.inference.get_azure_openai_client(api_version="2024-06-01") as client: response = client.chat.completions.create( - model="gpt-4-0613", + model="gpt-35-turbo-16k", messages=[ { "role": "user", diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py index 4c421acac2e9..8ca479b33b9b 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py @@ -19,7 +19,13 @@ Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. - * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true - Optional. For detailed traces, including chat request and response messages. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. + + Update the Azure OpenAI api-version as needed (see `api_version=` below). Values can be found here: + https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + + Update the model deployment name as needed. See `model=` below. """ import os from azure.ai.projects import AIProjectClient @@ -31,9 +37,8 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as project_client: - # Enable Azure Monitor tracing. Set environment variable `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true` - # for detailed logs, including chat request and response messages. - application_insights_connection_string = project_client.diagnostics.get_connection_string() + # Enable Azure Monitor tracing + application_insights_connection_string = project_client.telemetry.get_connection_string() if not application_insights_connection_string: print("Application Insights was not enabled for this project.") print("Enable it via the 'Tracing' tab in your AI Studio project page.") @@ -44,7 +49,7 @@ with project_client.inference.get_azure_openai_client(api_version="2024-06-01") as client: response = client.chat.completions.create( - model="gpt-4-0613", + model="gpt-35-turbo-16k", messages=[ { "role": "user", diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py index df703286a343..3ec2cae585e9 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py @@ -17,8 +17,14 @@ pip install azure-ai-projects openai opentelemetry.instrumentation.openai opentelemetry-sdk opentelemetry-exporter-otlp-proto-http Set these environment variables with your own values: - * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. - * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true - Optional. For detailed traces, including chat request and response messages. + * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. + + Update the Azure OpenAI api-version as needed (see `api_version=` below). Values can be found here: + https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + + Update the model deployment name as needed. See `model=` below. """ import os import sys @@ -30,15 +36,14 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) as project_client: - # Enable console tracing. Set environment variable `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true` - # for detailed logs, including chat request and response messages. - project_client.diagnostics.enable(destination=sys.stdout) + # Enable console tracing + project_client.telemetry.enable(destination=sys.stdout) # Get an authenticated OpenAI client for your default Azure OpenAI connection: with project_client.inference.get_azure_openai_client(api_version="2024-06-01") as client: response = client.chat.completions.create( - model="gpt-4-0613", + model="gpt-35-turbo-16k", messages=[ { "role": "user", diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_text_embeddings_with_azure_ai_inference_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_text_embeddings_with_azure_ai_inference_client.py index 05dfde82b25d..ac603217b399 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_text_embeddings_with_azure_ai_inference_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_text_embeddings_with_azure_ai_inference_client.py @@ -14,7 +14,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variable with your own value: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index e30a5d552d03..16ec88fac00c 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -1,4 +1,5 @@ # pylint: disable=too-many-lines +# pylint: disable=too-many-lines # # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py deleted file mode 100644 index c14a0b5ead80..000000000000 --- a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics.py +++ /dev/null @@ -1,34 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import sys -from devtools_testutils import recorded_by_proxy -from azure.ai.projects import AIProjectClient -from diagnostics_test_base import DiagnosticsTestBase, servicePreparerDiagnosticsTests - - -# The test class name needs to start with "Test" to get collected by pytest -class TestDiagnostics(DiagnosticsTestBase): - - @servicePreparerDiagnosticsTests() - @recorded_by_proxy - def test_diagnostics_get_connection_string(self, **kwargs): - with self.get_sync_client(**kwargs) as project_client: - connection_string = project_client.diagnostics.get_connection_string() - print(connection_string) - assert connection_string - assert bool(DiagnosticsTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match(connection_string)) - assert connection_string == project_client.diagnostics.get_connection_string() - - @servicePreparerDiagnosticsTests() - def test_diagnostics_enable_console_tracing(self, **kwargs): - with self.get_sync_client(**kwargs) as project_client: - project_client.diagnostics.enable(destination=sys.stdout) - #TODO: Create inference client and do chat completions. How do I know if traces were emitted? - - @servicePreparerDiagnosticsTests() - def test_diagnostics_enable_otlp_tracing(self, **kwargs): - with self.get_sync_client(**kwargs) as project_client: - project_client.diagnostics.enable(destination="https://some.otlp.collector.endpoint") - #TODO: Create inference client and do chat completions. Test proxy will log attempt at telemetry call. diff --git a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py b/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py deleted file mode 100644 index aca665503138..000000000000 --- a/sdk/ai/azure-ai-projects/tests/diagnostics/test_diagnostics_async.py +++ /dev/null @@ -1,33 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -import sys -from devtools_testutils.aio import recorded_by_proxy_async -from diagnostics_test_base import DiagnosticsTestBase, servicePreparerDiagnosticsTests - - -# The test class name needs to start with "Test" to get collected by pytest -class TestDiagnosticsAsync(DiagnosticsTestBase): - - @servicePreparerDiagnosticsTests() - @recorded_by_proxy_async - async def test_diagnostics_get_connection_string_async(self, **kwargs): - async with self.get_async_client(**kwargs) as project_client: - connection_string = await project_client.diagnostics.get_connection_string() - print(connection_string) - assert connection_string - assert bool(DiagnosticsTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match(connection_string)) - assert connection_string == await project_client.diagnostics.get_connection_string() - - @servicePreparerDiagnosticsTests() - async def test_diagnostics_enable_console_tracing_async(self, **kwargs): - async with self.get_async_client(**kwargs) as project_client: - project_client.diagnostics.enable(destination=sys.stdout) - #TODO: Create inference client and do chat completions. How do I know if traces were emitted? - - @servicePreparerDiagnosticsTests() - async def test_diagnostics_enable_otlp_tracing(self, **kwargs): - async with self.get_async_client(**kwargs) as project_client: - project_client.diagnostics.enable(destination="https://some.otlp.collector.endpoint") - #TODO: Create inference client and do chat completions. Test proxy will log attempt at telemetry call. \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py b/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py similarity index 80% rename from sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py rename to sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py index a2bb72fb0254..c4ce9d8de412 100644 --- a/sdk/ai/azure-ai-projects/tests/diagnostics/diagnostics_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py @@ -14,10 +14,10 @@ Set these environment variables before running the test: set AZURE_AI_PROJECTS_DIAGNOSTICS_TEST_PROJECT_CONNECTION_STRING= """ -servicePreparerDiagnosticsTests = functools.partial( +servicePreparerTelemetryTests = functools.partial( EnvironmentVariableLoader, - "azure_ai_projects_diagnostics_test", - azure_ai_projects_diagnostics_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;hub-name", + "azure_ai_projects_telemetry_test", + azure_ai_projects_telemetry_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;hub-name", ) # Set to True to enable SDK logging @@ -34,7 +34,7 @@ logger.addHandler(handler) -class DiagnosticsTestBase(AzureRecordedTestCase): +class TelemetryTestBase(AzureRecordedTestCase): # Regular expression describing the pattern of an Application Insights connection string. REGEX_APPINSIGHTS_CONNECTION_STRING = re.compile( @@ -42,7 +42,7 @@ class DiagnosticsTestBase(AzureRecordedTestCase): ) def get_sync_client(self, **kwargs) -> AIProjectClient: - conn_str = kwargs.pop("azure_ai_projects_diagnostics_tests_project_connection_string") + conn_str = kwargs.pop("azure_ai_projects_telemetry_tests_project_connection_string") project_client = AIProjectClient.from_connection_string( credential=self.get_credential(AIProjectClient, is_async=False), conn_str=conn_str, @@ -51,7 +51,7 @@ def get_sync_client(self, **kwargs) -> AIProjectClient: return project_client def get_async_client(self, **kwargs) -> AIProjectClient: - conn_str = kwargs.pop("azure_ai_projects_diagnostics_tests_project_connection_string") + conn_str = kwargs.pop("azure_ai_projects_telemetry_tests_project_connection_string") project_client = AIProjectClientAsync.from_connection_string( credential=self.get_credential(AIProjectClientAsync, is_async=False), conn_str=conn_str, diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry.py b/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry.py new file mode 100644 index 000000000000..a49d87f46469 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry.py @@ -0,0 +1,34 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import sys +from devtools_testutils import recorded_by_proxy +from azure.ai.projects import AIProjectClient +from telemetry_test_base import TelemetryTestBase, servicePreparerTelemetryTests + + +# The test class name needs to start with "Test" to get collected by pytest +class TestTelemetry(TelemetryTestBase): + + @servicePreparerTelemetryTests() + @recorded_by_proxy + def test_telemetry_get_connection_string(self, **kwargs): + with self.get_sync_client(**kwargs) as project_client: + connection_string = project_client.telemetry.get_connection_string() + print(connection_string) + assert connection_string + assert bool(TelemetryTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match(connection_string)) + assert connection_string == project_client.telemetry.get_connection_string() + + @servicePreparerTelemetryTests() + def test_telemetry_enable_console_tracing(self, **kwargs): + with self.get_sync_client(**kwargs) as project_client: + project_client.telemetry.enable(destination=sys.stdout) + # TODO: Create inference client and do chat completions. How do I know if traces were emitted? + + @servicePreparerTelemetryTests() + def test_telemetry_enable_otlp_tracing(self, **kwargs): + with self.get_sync_client(**kwargs) as project_client: + project_client.telemetry.enable(destination="https://some.otlp.collector.endpoint") + # TODO: Create inference client and do chat completions. Test proxy will log attempt at telemetry call. diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry_async.py b/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry_async.py new file mode 100644 index 000000000000..adadf9dc224d --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry_async.py @@ -0,0 +1,33 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import sys +from devtools_testutils.aio import recorded_by_proxy_async +from telemetry_test_base import TelemetryTestBase, servicePreparerTelemetryTests + + +# The test class name needs to start with "Test" to get collected by pytest +class TestTelemetryAsync(TelemetryTestBase): + + @servicePreparerTelemetryTests() + @recorded_by_proxy_async + async def test_telemetry_get_connection_string_async(self, **kwargs): + async with self.get_async_client(**kwargs) as project_client: + connection_string = await project_client.telemetry.get_connection_string() + print(connection_string) + assert connection_string + assert bool(TelemetryTestBase.REGEX_APPINSIGHTS_CONNECTION_STRING.match(connection_string)) + assert connection_string == await project_client.telemetry.get_connection_string() + + @servicePreparerTelemetryTests() + async def test_telemetry_enable_console_tracing_async(self, **kwargs): + async with self.get_async_client(**kwargs) as project_client: + project_client.telemetry.enable(destination=sys.stdout) + # TODO: Create inference client and do chat completions. How do I know if traces were emitted? + + @servicePreparerTelemetryTests() + async def test_telemetry_enable_otlp_tracing(self, **kwargs): + async with self.get_async_client(**kwargs) as project_client: + project_client.telemetry.enable(destination="https://some.otlp.collector.endpoint") + # TODO: Create inference client and do chat completions. Test proxy will log attempt at telemetry call. diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index d74d92141c4c..47e0eca91f0a 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: 5ec4b1fea1a0d5fb578bb7705a6dd61f39dcfea3 +commit: fc8aca891c34ecf74b4b2ca226f1dd4a84c8a6e3 repo: Azure/azure-rest-api-specs additionalDirectories: From e659c51dd10cdf70d55483e790ed27fd58fd3474 Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Wed, 30 Oct 2024 14:19:52 -0800 Subject: [PATCH 065/138] Jhakulin/agents get file content async (#38201) * update to get file content * agents async updates * update * fix await/pylance issue * fix pylance error on AsyncToolSet * return empty set if no tool resources for code interpreter * added async toolset sample --- .../ai/projects/aio/operations/_patch.py | 132 +++++++++++++++++- .../azure/ai/projects/models/_patch.py | 2 + .../azure/ai/projects/operations/_patch.py | 75 ++++------ .../sample_agents_code_interpreter_async.py | 106 ++++++++++++++ .../sample_agents_run_with_toolset_async.py | 87 ++++++++++++ .../async_samples/user_async_functions.py | 9 +- .../agents/sample_agents_code_interpreter.py | 6 +- 7 files changed, 358 insertions(+), 59 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index a254c77a09f6..d80f83b53e21 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -9,10 +9,11 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ from ..._vendor import FileType -import sys +import sys, io, asyncio import logging import os import time +from pathlib import Path from io import TextIOWrapper from typing import IO, Any, AsyncIterator, Dict, List, Iterable, MutableMapping, Optional, Union, cast, overload @@ -754,7 +755,7 @@ async def update_agent( instructions: Optional[str] = None, tools: Optional[List[_models.ToolDefinition]] = None, tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.ToolSet] = None, + toolset: Optional[_models.AsyncToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, @@ -818,7 +819,7 @@ async def update_agent( if body is not _Unset: if isinstance(body, io.IOBase): return await super().update_agent(body=body, content_type=content_type, **kwargs) - return super().update_agent(body=body, **kwargs) + return await super().update_agent(body=body, **kwargs) if toolset is not None: self._toolset = toolset @@ -852,7 +853,7 @@ def _validate_tools_and_tool_resources(self, tools: Optional[List[_models.ToolDe raise ValueError("Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided") - def get_toolset(self) -> Optional[_models.AsyncToolSet]: + def _get_toolset(self) -> Optional[_models.AsyncToolSet]: """ Get the toolset for the agent. @@ -1254,7 +1255,7 @@ async def create_and_process_run( await self.cancel_run(thread_id=thread_id, run_id=run.id) break - toolset = self.get_toolset() + toolset = self._get_toolset() if toolset: tool_outputs = await toolset.execute_tool_calls(tool_calls) else: @@ -1801,7 +1802,7 @@ async def _handle_submit_tool_outputs( logger.debug("No tool calls to execute.") return - toolset = self.get_toolset() + toolset = self._get_toolset() if toolset: tool_outputs = await toolset.execute_tool_calls(tool_calls) else: @@ -2254,7 +2255,7 @@ async def create_vector_store_file_batch_and_poll( async def create_vector_store_file_batch_and_poll( self, vector_store_id: str, - body: Union[JSON, IO[bytes]] = None, + body: Union[JSON, IO[bytes], None] = None, *, file_ids: List[str] = _Unset, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, @@ -2295,6 +2296,123 @@ async def create_vector_store_file_batch_and_poll( return vector_store_file_batch + @distributed_trace_async + async def get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[bytes]: + """ + Asynchronously returns file content as a byte stream for the given file_id. + + :param file_id: The ID of the file to retrieve. Required. + :type file_id: str + :return: An async iterator that yields bytes from the file content. + :rtype: AsyncIterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: If the HTTP request fails. + """ + kwargs["stream"] = True + response = await super()._get_file_content(file_id, **kwargs) + return cast(AsyncIterator[bytes], response) + + @distributed_trace_async + async def get_messages( + self, + thread_id: str, + *, + run_id: Optional[str] = None, + limit: Optional[int] = None, + order: Optional[Union[str, _models.ListSortOrder]] = None, + after: Optional[str] = None, + before: Optional[str] = None, + **kwargs: Any, + ) -> _models.ThreadMessages: + """Parses the OpenAIPageableListOfThreadMessage response and returns a ThreadMessages object. + + :param thread_id: Identifier of the thread. Required. + :type thread_id: str + :keyword run_id: Filter messages by the run ID that generated them. Default value is None. + :paramtype run_id: str + :keyword limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. Default value is None. + :paramtype limit: int + :keyword order: Sort order by the created_at timestamp of the objects. asc for ascending order + and desc for descending order. Known values are: "asc" and "desc". Default value is None. + :paramtype order: str or ~azure.ai.projects.models.ListSortOrder + :keyword after: A cursor for use in pagination. after is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the + list. Default value is None. + :paramtype after: str + :keyword before: A cursor for use in pagination. before is an object ID that defines your place + in the list. For instance, if you make a list request and receive 100 objects, ending with + obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of + the list. Default value is None. + :paramtype before: str + + :return: ThreadMessages. The ThreadMessages is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.ThreadMessages + """ + messages = await super().list_messages( + thread_id, run_id=run_id, limit=limit, order=order, after=after, before=before, **kwargs + ) + return _models.ThreadMessages(pageable_list=messages) + + @distributed_trace_async + async def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: + """ + Asynchronously saves file content retrieved using a file identifier to the specified local directory. + + :param file_id: The unique identifier for the file to retrieve. + :type file_id: str + :param file_name: The name of the file to be saved. + :type file_name: str + :param target_dir: The directory where the file should be saved. Defaults to the current working directory. + :type target_dir: str or Path + :raises ValueError: If the target path is not a directory or the file name is invalid. + :raises RuntimeError: If file content retrieval fails or no content is found. + :raises TypeError: If retrieved chunks are not bytes-like objects. + :raises IOError: If writing to the file fails. + """ + try: + # Determine and validate the target directory + path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() + path.mkdir(parents=True, exist_ok=True) + if not path.is_dir(): + raise ValueError(f"The target path '{path}' is not a directory.") + + # Sanitize and validate the file name + sanitized_file_name = Path(file_name).name + if not sanitized_file_name: + raise ValueError("The provided file name is invalid.") + + # Retrieve the file content + file_content_stream = await self.get_file_content(file_id) + if not file_content_stream: + raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") + + # Collect all chunks asynchronously + chunks = [] + async for chunk in file_content_stream: + if isinstance(chunk, (bytes, bytearray)): + chunks.append(chunk) + else: + raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") + + target_file_path = path / sanitized_file_name + + # Write the collected content to the file synchronously + def write_file(collected_chunks: list): + with open(target_file_path, "wb") as file: + for chunk in collected_chunks: + file.write(chunk) + + # Use the event loop to run the synchronous function in a thread executor + loop = asyncio.get_running_loop() + await loop.run_in_executor(None, write_file, chunks) + + logger.debug(f"File '{sanitized_file_name}' saved successfully at '{target_file_path}'.") + + except (ValueError, RuntimeError, TypeError, IOError) as e: + logger.error(f"An error occurred in save_file: {e}") + raise + __all__: List[str] = [ "AgentsOperations", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 5cd8c91f51f4..f84090ba6412 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -493,6 +493,8 @@ def resources(self) -> ToolResources: """ Get the code interpreter resources. """ + if not self.file_ids: + return ToolResources() return ToolResources(code_interpreter=CodeInterpreterToolResource(file_ids=list(self.file_ids))) def execute(self, tool_call: Any) -> Any: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 1877267f6247..b290c286c67e 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -940,7 +940,7 @@ def _validate_tools_and_tool_resources(self, tools: Optional[List[_models.ToolDe if tool_resources.code_interpreter is not None and not any(isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools): raise ValueError("Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided") - def get_toolset(self) -> Optional[_models.ToolSet]: + def _get_toolset(self) -> Optional[_models.ToolSet]: """ Get the toolset for the agent. @@ -1342,7 +1342,7 @@ def create_and_process_run( self.cancel_run(thread_id=thread_id, run_id=run.id) break - toolset = self.get_toolset() + toolset = self._get_toolset() if toolset: tool_outputs = toolset.execute_tool_calls(tool_calls) else: @@ -1889,7 +1889,7 @@ def _handle_submit_tool_outputs( logger.debug("No tool calls to execute.") return - toolset = self.get_toolset() + toolset = self._get_toolset() if toolset: tool_outputs = toolset.execute_tool_calls(tool_calls) else: @@ -2444,64 +2444,49 @@ def get_messages( @distributed_trace def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None) -> None: """ - Saves file content retrieved using a file identifier to the specified local directory. + Synchronously saves file content retrieved using a file identifier to the specified local directory. :param file_id: The unique identifier for the file to retrieve. :type file_id: str :param file_name: The name of the file to be saved. :type file_name: str :param target_dir: The directory where the file should be saved. Defaults to the current working directory. - :type target_dir: Union[str, Path] + :raises ValueError: If the target path is not a directory or the file name is invalid. + :raises RuntimeError: If file content retrieval fails or no content is found. + :raises TypeError: If retrieved chunks are not bytes-like objects. + :raises IOError: If writing to the file fails. """ - # Determine target directory - path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() - logger.debug(f"Using target directory: {path}") - - if not path.exists(): - logger.debug(f"Creating non-existent target directory: {path}") - path.mkdir(parents=True, exist_ok=True) - elif not path.is_dir(): - error_msg = f"The target path '{path}' is not a directory." - logger.error(error_msg) - raise ValueError(error_msg) - - # Ensure file_name is properly sanitized - file_name = Path(file_name).name - if not file_name: - error_msg = "The provided file name is invalid." - logger.error(error_msg) - raise ValueError(error_msg) - - # Get file content try: + # Determine and validate the target directory + path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() + path.mkdir(parents=True, exist_ok=True) + if not path.is_dir(): + raise ValueError(f"The target path '{path}' is not a directory.") + + # Sanitize and validate the file name + sanitized_file_name = Path(file_name).name + if not sanitized_file_name: + raise ValueError("The provided file name is invalid.") + + # Retrieve the file content file_content_stream = self.get_file_content(file_id) if not file_content_stream: - error_msg = f"No content retrievable for file ID '{file_id}'." - logger.error(error_msg) - raise RuntimeError(error_msg) - except Exception as e: - error_msg = f"Failed to retrieve file content for file ID '{file_id}': {e}" - logger.error(error_msg) - raise RuntimeError(error_msg) from e - - # Path to save the file - target_file_path = path / file_name - - # Write file content directly from the generator, ensuring each chunk is bytes - try: + raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") + + target_file_path = path / sanitized_file_name + + # Write the file content to disk with target_file_path.open("wb") as file: for chunk in file_content_stream: if isinstance(chunk, (bytes, bytearray)): file.write(chunk) else: raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") - logger.debug(f"File '{file_name}' saved successfully at '{target_file_path}'.") - except TypeError as e: - logger.error(f"Failed due to unexpected chunk type: {e}") - raise - except IOError as e: - error_msg = f"Failed to write to file '{target_file_path}': {e}" - logger.error(error_msg) + + logger.debug(f"File '{sanitized_file_name}' saved successfully at '{target_file_path}'.") + + except (ValueError, RuntimeError, TypeError, IOError) as e: + logger.error(f"An error occurred in save_file: {e}") raise diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py new file mode 100644 index 000000000000..fd1c27ff25da --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py @@ -0,0 +1,106 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_code_interpreter_async.py + +DESCRIPTION: + This sample demonstrates how to use code interpreter tool with agent from + the Azure Agents service using a asynchronous client. + +USAGE: + python sample_agents_code_interpreter_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import CodeInterpreterTool +from azure.ai.projects.models import FilePurpose +from azure.identity import DefaultAzureCredential +from pathlib import Path + +import os + + +async def main(): + + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + # upload a file and wait for it to be processed + file = await project_client.agents.upload_file_and_poll( + file_path="../nifty_500_quarterly_results.csv", purpose=FilePurpose.AGENTS + ) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?" + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + messages = await project_client.agents.get_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + last_msg = messages.get_last_text_message_by_sender("assistant") + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + for image_content in messages.image_contents: + print(f"Image File ID: {image_content.image_file.file_id}") + file_name = f"{image_content.image_file.file_id}_image_file.png" + await project_client.agents.save_file(file_id=image_content.image_file.file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + for file_path_annotation in messages.file_path_annotations: + print(f"File Paths:") + print(f"Type: {file_path_annotation.type}") + print(f"Text: {file_path_annotation.text}") + print(f"File ID: {file_path_annotation.file_path.file_id}") + print(f"Start Index: {file_path_annotation.start_index}") + print(f"End Index: {file_path_annotation.end_index}") + file_name = Path(file_path_annotation.text).name + await project_client.agents.save_file(file_id=file_path_annotation.file_path.file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py new file mode 100644 index 000000000000..36664ed72a04 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py @@ -0,0 +1,87 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_run_with_toolset_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with toolset from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_run_with_toolset_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os, asyncio +from azure.ai.projects.aio import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import AsyncFunctionTool, AsyncToolSet, CodeInterpreterTool +from user_async_functions import user_async_functions + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + +async def main(): + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], + ) + + async with project_client: + + # Initialize agent toolset with user functions and code interpreter + functions = AsyncFunctionTool(user_async_functions) + code_interpreter = CodeInterpreterTool() + + toolset = AsyncToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset + ) + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = await project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = await project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py index 4931352e03c6..8be8ca439e36 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py @@ -6,6 +6,7 @@ import asyncio import os import sys +from typing import Any, Callable, Set # Add parent directory to sys.path to import user_functions @@ -22,8 +23,8 @@ async def send_email_async(recipient: str, subject: str, body: str) -> str: # Statically defined user functions for fast reference with send_email as async but the rest as sync -user_async_functions = { - "fetch_current_datetime": fetch_current_datetime, - "fetch_weather": fetch_weather, - "send_email": send_email_async, +user_async_functions: Set[Callable[..., Any]] = { + fetch_current_datetime, + fetch_weather, + send_email_async, } diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py index d82bbaeb4d21..4b02174acdef 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py @@ -25,7 +25,6 @@ from azure.ai.projects import AIProjectClient from azure.ai.projects.models import CodeInterpreterTool from azure.ai.projects.models import FilePurpose -from azure.ai.projects.models import MessageTextFileCitationAnnotation, MessageTextFilePathAnnotation from azure.identity import DefaultAzureCredential from pathlib import Path @@ -87,8 +86,9 @@ for image_content in messages.image_contents: print(f"Image File ID: {image_content.image_file.file_id}") - project_client.agents.save_file(file_id=image_content.image_file.file_id, file_name="image_file.png") - print(f"Saved image file to: {Path.cwd() / 'image_file.png'}") + file_name = f"{image_content.image_file.file_id}_image_file.png" + project_client.agents.save_file(file_id=image_content.image_file.file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") for file_path_annotation in messages.file_path_annotations: print(f"File Paths:") From 5b4b1b9c80a791287de183bcde83cabb9e3a78d8 Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Thu, 31 Oct 2024 09:00:58 -0700 Subject: [PATCH 066/138] Fixed test (#38222) --- .../tests/agents/test_agents_client.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index 16ec88fac00c..fb10f2f007f6 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -1,5 +1,4 @@ # pylint: disable=too-many-lines -# pylint: disable=too-many-lines # # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -78,7 +77,7 @@ def fetch_current_datetime_live(): # create tool for agent use -def fetch_current_datetime_recordings(): +def fetch_current_datetime(): """ Get the current time as a JSON string. @@ -89,8 +88,8 @@ def fetch_current_datetime_recordings(): return time_json -# Statically defined user functions for fast reference -user_functions_recording = {"fetch_current_datetime": fetch_current_datetime_recordings} +# create function for agent use +user_functions = {fetch_current_datetime} # The test class name needs to start with "Test" to get collected by pytest @@ -185,7 +184,7 @@ def test_create_agent_with_tools(self, **kwargs): assert isinstance(client, AIProjectClient) # initialize agent functions - functions = FunctionTool(functions=user_functions_recording) + functions = FunctionTool(functions=user_functions) # create agent with tools agent = client.agents.create_agent( @@ -194,7 +193,7 @@ def test_create_agent_with_tools(self, **kwargs): assert agent.id print("Created agent, agent ID", agent.id) assert agent.tools - assert agent.tools[0] == functions.definitions + assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) # delete agent and close client @@ -211,7 +210,7 @@ def test_create_agent_with_tools_and_resources(self, **kwargs): assert isinstance(client, AIProjectClient) # initialize agent functions - functions = FunctionTool(functions=user_functions_recording) + functions = FunctionTool(functions=user_functions) # create agent with tools agent = client.agents.create_agent( @@ -808,7 +807,7 @@ def test_submit_tool_outputs_to_run(self, **kwargs): assert isinstance(client, AIProjectClient) # Initialize agent tools - functions = FunctionTool(user_functions_recording) + functions = FunctionTool(functions=user_functions) code_interpreter = CodeInterpreterTool() toolset = ToolSet() From 5445d7405e59a86d1ac42dee1af039e95e78e1a4 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 31 Oct 2024 09:37:12 -0700 Subject: [PATCH 067/138] Support the new unified models deployed to the AIServices connection --- .../ai/projects/aio/operations/_patch.py | 94 ++++++++++++------ .../azure/ai/projects/models/_enums.py | 8 +- .../azure/ai/projects/operations/_patch.py | 97 +++++++++++++------ .../azure_ai_projects_tests.env | 6 +- .../async_samples/sample_connections_async.py | 24 +++-- .../samples/connections/sample_connections.py | 27 ++++-- ...ns_with_azure_ai_inference_client_async.py | 13 ++- ...pletions_with_azure_openai_client_async.py | 16 +-- ...gs_with_azure_ai_inference_client_async.py | 12 ++- ...pletions_with_azure_ai_inference_client.py | 14 ++- ...erence_client_and_azure_monitor_tracing.py | 8 +- ...ai_inference_client_and_console_tracing.py | 10 +- ...at_completions_with_azure_openai_client.py | 15 +-- ...openai_client_and_azure_monitor_tracing.py | 10 +- ...azure_openai_client_and_console_tracing.py | 10 +- ...beddings_with_azure_ai_inference_client.py | 12 ++- .../tests/connections/test_connections.py | 18 ++++ .../connections/test_connections_async.py | 20 +++- 18 files changed, 288 insertions(+), 126 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index d80f83b53e21..388383c20eff 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -14,6 +14,7 @@ import os import time from pathlib import Path +from azure.core.exceptions import ResourceNotFoundError from io import TextIOWrapper from typing import IO, Any, AsyncIterator, Dict, List, Iterable, MutableMapping, Optional, Union, cast, overload @@ -47,19 +48,31 @@ def __init__(self, outer_instance): @distributed_trace_async async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for the default - Serverless connection. The Serverless connection must have a Chat Completions AI model deployment. - The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. + Azure AI Services connected resource. At least one AI model that supports chat completions must be deployed + in this resource. The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. :return: An authenticated chat completions client :rtype: ~azure.ai.inference.models.ChatCompletionsClient :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No serverless connection found") + + # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on + # a separate "Serverless" connection. This is now deprecated. + use_serverless_connection : bool = (os.getenv("USE_SERVERLESS_CONNECTION", None) == "true") + + if use_serverless_connection: + connection = await self._outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + ) + if not connection: + return None + else: + connection = await self._outer_instance.connections.get_default( + connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, **kwargs + ) + if not connection: + return None try: from azure.ai.inference.aio import ChatCompletionsClient @@ -68,6 +81,11 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) + if use_serverless_connection: + endpoint = connection.endpoint_url + else: + endpoint = f"https://{connection.name}.services.ai.azure.com/models" + if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" @@ -75,7 +93,7 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" from azure.core.credentials import AzureKeyCredential client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) + endpoint=endpoint, credential=AzureKeyCredential(connection.key) ) elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth @@ -83,14 +101,14 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" ) client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential + endpoint=endpoint, credential=connection.properties.token_credential ) elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" ) - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) + client = ChatCompletionsClient(endpoint=endpoint, credential=connection.token_credential) else: raise ValueError("Unknown authentication type") @@ -99,19 +117,31 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" @distributed_trace_async async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for the default - Serverless connection. The Serverless connection must have a Text Embeddings AI model deployment. - The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. + Azure AI Services connected resource. At least one AI model that supports text embeddings must be deployed + in this resource. The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. :return: An authenticated chat completions client :rtype: ~azure.ai.inference.models.EmbeddingsClient :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) - connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No serverless connection found") + + # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on + # a separate "Serverless" connection. This is now deprecated. + use_serverless_connection : bool = (os.getenv("USE_SERVERLESS_CONNECTION", None) == "true") + + if use_serverless_connection: + connection = await self._outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + ) + if not connection: + return None + else: + connection = await self._outer_instance.connections.get_default( + connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, **kwargs + ) + if not connection: + return None try: from azure.ai.inference.aio import EmbeddingsClient @@ -120,27 +150,32 @@ async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) + if use_serverless_connection: + endpoint = connection.endpoint_url + else: + endpoint = f"https://{connection.name}.services.ai.azure.com/models" + if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" ) from azure.core.credentials import AzureKeyCredential - client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" ) client = EmbeddingsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential + endpoint=endpoint, credential=connection.properties.token_credential ) elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" ) - client = EmbeddingsClient(endpoint=connection.connection_url, credential=connection.token_credential) + client = EmbeddingsClient(endpoint=endpoint, credential=connection.token_credential) else: raise ValueError("Unknown authentication type") @@ -225,7 +260,7 @@ async def get_default( :type connection_type: ~azure.ai.projects.models._models.ConnectionType :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. :type with_credentials: bool - :return: The connection properties + :return: The connection properties, or `None` if there are no connections of the specified type. :rtype: ~azure.ai.projects.models._models.ConnectionProperties :raises ~azure.core.exceptions.HttpResponseError: """ @@ -254,7 +289,7 @@ async def get(self, *, connection_name: str, with_credentials: bool = False, **k :type connection_name: str :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. :type with_credentials: bool - :return: The connection properties + :return: The connection properties, or `None` if a connection with this name does not exist. :rtype: ~azure.ai.projects.models._models.ConnectionProperties :raises ~azure.core.exceptions.HttpResponseError: """ @@ -262,9 +297,12 @@ async def get(self, *, connection_name: str, with_credentials: bool = False, **k if not connection_name: raise ValueError("Endpoint name cannot be empty") if with_credentials: - connection: GetConnectionResponse = await self._get_connection_with_secrets( - connection_name=connection_name, ignored="ignore", **kwargs - ) + try: + connection: GetConnectionResponse = await self._get_connection_with_secrets( + connection_name=connection_name, ignored="ignore", **kwargs + ) + except ResourceNotFoundError as _: + return None if connection.properties.auth_type == AuthenticationType.AAD: return ConnectionProperties(connection=connection, token_credential=self._config.credential) elif connection.properties.auth_type == AuthenticationType.SAS: @@ -282,9 +320,11 @@ async def get(self, *, connection_name: str, with_credentials: bool = False, **k return ConnectionProperties(connection=connection) else: - return ConnectionProperties( + try: connection=await self._get_connection(connection_name=connection_name, **kwargs) - ) + except ResourceNotFoundError as _: + return None + return ConnectionProperties(connection=connection) @distributed_trace_async async def list( diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py index 7ca731b7639b..afd4db01c895 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py @@ -147,13 +147,15 @@ class ConnectionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The Type (or category) of the connection.""" AZURE_OPEN_AI = "AzureOpenAI" - """Azure OpenAI service""" + """Azure OpenAI Service""" SERVERLESS = "Serverless" - """Serverless API service""" + """Serverless API Service""" AZURE_BLOB_STORAGE = "AzureBlob" """Azure Blob Storage""" - AI_SERVICES = "AIServices" + AZURE_AI_SERVICES = "AIServices" """Azure AI Services""" + AZURE_AI_SEARCH = "CognitiveSearch" + """Azure AI Search""" class DoneEvent(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index b290c286c67e..b16a17949153 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -1,5 +1,4 @@ # pylint: disable=too-many-lines -# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -9,6 +8,7 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ import sys, io, logging, os, time +from azure.core.exceptions import ResourceNotFoundError from io import IOBase, TextIOWrapper from typing import List, Iterable, Union, IO, Any, Dict, Optional, overload, TYPE_CHECKING, Iterator, cast from pathlib import Path @@ -54,19 +54,31 @@ def __init__(self, outer_instance): @distributed_trace def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": """Get an authenticated ChatCompletionsClient (from the package azure-ai-inference) for the default - Serverless connection. The Serverless connection must have a Chat Completions AI model deployment. - The package `azure-ai-inference` must be installed prior to calling this method. + Azure AI Services connected resource. At least one AI model that supports chat completions must be deployed + in this resource. The package `azure-ai-inference` must be installed prior to calling this method. - :return: An authenticated chat completions client + :return: An authenticated chat completions client, or `None` if no Azure AI Services connection is found. :rtype: ~azure.ai.inference.models.ChatCompletionsClient :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No serverless connection found") + + # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on + # a separate "Serverless" connection. This is now deprecated. + use_serverless_connection : bool = (os.getenv("USE_SERVERLESS_CONNECTION", None) == "true") + + if use_serverless_connection: + connection = self._outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + ) + if not connection: + return None + else: + connection = self._outer_instance.connections.get_default( + connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, **kwargs + ) + if not connection: + return None try: from azure.ai.inference import ChatCompletionsClient @@ -75,6 +87,11 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) + if use_serverless_connection: + endpoint = connection.endpoint_url + else: + endpoint = f"https://{connection.name}.services.ai.azure.com/models" + if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" @@ -82,7 +99,7 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": from azure.core.credentials import AzureKeyCredential client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) + endpoint=endpoint, credential=AzureKeyCredential(connection.key) ) elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth @@ -90,14 +107,14 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" ) client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential + endpoint=endpoint, credential=connection.properties.token_credential ) elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" ) - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) + client = ChatCompletionsClient(endpoint=endpoint, credential=connection.token_credential) else: raise ValueError("Unknown authentication type") @@ -106,19 +123,31 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": @distributed_trace def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": """Get an authenticated EmbeddingsClient (from the package azure-ai-inference) for the default - Serverless connection. The Serverless connection must have a Text Embeddings AI model deployment. - The package `azure-ai-inference` must be installed prior to calling this method. + Azure AI Services connected resource. At least one AI model that supports text embeddings must be deployed + in this resource. The package `azure-ai-inference` must be installed prior to calling this method. :return: An authenticated chat completions client :rtype: ~azure.ai.inference.models.EmbeddingsClient :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) - connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs - ) - if not connection: - raise ValueError("No serverless connection found") + + # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on + # a separate "Serverless" connection. This is now deprecated. + use_serverless_connection : bool = (os.getenv("USE_SERVERLESS_CONNECTION", None) == "true") + + if use_serverless_connection: + connection = self._outer_instance.connections.get_default( + connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + ) + if not connection: + return None + else: + connection = self._outer_instance.connections.get_default( + connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, **kwargs + ) + if not connection: + return None try: from azure.ai.inference import EmbeddingsClient @@ -127,27 +156,32 @@ def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" ) + if use_serverless_connection: + endpoint = connection.endpoint_url + else: + endpoint = f"https://{connection.name}.services.ai.azure.com/models" + if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" ) from azure.core.credentials import AzureKeyCredential - client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" ) client = EmbeddingsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential + endpoint=endpoint, credential=connection.properties.token_credential ) elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" ) - client = EmbeddingsClient(endpoint=connection.endpoint_url, credential=connection.token_credential) + client = EmbeddingsClient(endpoint=endpoint, credential=connection.token_credential) else: raise ValueError("Unknown authentication type") @@ -233,7 +267,7 @@ def get_default( :type connection_type: ~azure.ai.projects.models._models.ConnectionType :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. :type with_credentials: bool - :return: The connection properties + :return: The connection properties, or `None` if there are no connections of the specified type. :rtype: ~azure.ai.projects.models._models.ConnectionProperties :raises ~azure.core.exceptions.HttpResponseError: """ @@ -262,7 +296,7 @@ def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: :type connection_name: str :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. :type with_credentials: bool - :return: The connection properties + :return: The connection properties, or `None` if a connection with this name does not exist. :rtype: ~azure.ai.projects.models._models.ConnectionProperties :raises ~azure.core.exceptions.HttpResponseError: """ @@ -270,9 +304,12 @@ def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: if not connection_name: raise ValueError("Connection name cannot be empty") if with_credentials: - connection: GetConnectionResponse = self._get_connection_with_secrets( - connection_name=connection_name, ignored="ignore", **kwargs - ) + try: + connection: GetConnectionResponse = self._get_connection_with_secrets( + connection_name=connection_name, ignored="ignore", **kwargs + ) + except ResourceNotFoundError as _: + return None if connection.properties.auth_type == AuthenticationType.AAD: return ConnectionProperties(connection=connection, token_credential=self._config.credential) elif connection.properties.auth_type == AuthenticationType.SAS: @@ -290,7 +327,11 @@ def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: return ConnectionProperties(connection=connection) else: - return ConnectionProperties(connection=self._get_connection(connection_name=connection_name, **kwargs)) + try: + connection = self._get_connection(connection_name=connection_name, **kwargs) + except ResourceNotFoundError as _: + return None + return ConnectionProperties(connection=connection) @distributed_trace def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) -> Iterable[ConnectionProperties]: diff --git a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env index 9b887cfc344d..c2cf5fdd5914 100644 --- a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env +++ b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env @@ -16,9 +16,11 @@ AZURE_AI_PROJECTS_CONNECTIONS_TESTS_SERVERLESS_CONNECTION_NAME= # To run Inference tests: AZURE_AI_PROJECTS_INFERENCE_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING} AZURE_AI_PROJECTS_INFERENCE_TESTS_MODEL_DEPLOYMENT_NAME= +# This will be removed soon +USE_SERVERLESS_CONNECTION=true -# To run Diagnostics tests: -AZURE_AI_PROJECTS_DIAGNOSTICS_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING} +# To run Telemetry tests: +AZURE_AI_PROJECTS_TELEMETRY_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING} # To run Agents tests: diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py index 8c3591376d06..124eedeb8211 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -5,8 +5,9 @@ """ DESCRIPTION: - Given an asynchronous AIProjectClient, this sample demonstrates how to enumerate connections - and get connections properties. + Given an asynchronous AIProjectClient, this sample demonstrates how to enumerate connections, + get connection properties, and create a chat completions client using the connection + properties. USAGE: python sample_connections_async.py @@ -20,6 +21,7 @@ tab in your AI Studio Project page. 2) CONNECTION_NAME - the name of a Serverless or Azure OpenAI connection, as found in the "Connections" tab in your AI Studio Hub page. + 3) MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. """ import asyncio @@ -31,9 +33,13 @@ async def sample_connections_async(): + project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] + connection_name = os.environ["CONNECTION_NAME"] + model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] + project_client = AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], + conn_str=project_connection_string, ) async with project_client: @@ -54,15 +60,15 @@ async def sample_connections_async(): # Get the properties of the default connection of a particular "type", with credentials connection = await project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, + connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, # Optional. Defaults to "False" ) - print("====> Get default Azure Open AI connection:") + print("====> Get default Azure AI Services connection:") print(connection) # Get the properties of a connection by connection name: connection = await project_client.connections.get( - connection_name=os.environ["CONNECTION_NAME"], + connection_name=connection_name, with_credentials=True, # Optional. Defaults to "False" ) print("====> Get connection by name:") @@ -96,7 +102,7 @@ async def sample_connections_async(): raise ValueError(f"Authentication type {connection.authentication_type} not supported.") response = await client.chat.completions.create( - model="gpt-4o", + model=model_deployment_name, messages=[ { "role": "user", @@ -106,7 +112,7 @@ async def sample_connections_async(): ) print(response.choices[0].message.content) - elif connection.connection_type == ConnectionType.SERVERLESS: + elif connection.connection_type == ConnectionType.AZURE_AI_SERVICES: from azure.ai.inference.aio import ChatCompletionsClient from azure.ai.inference.models import UserMessage @@ -127,7 +133,7 @@ async def sample_connections_async(): else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + response = await client.complete(model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")]) await client.close() print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index 8c6a1b527809..fd4b68cf38e2 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -5,8 +5,9 @@ """ DESCRIPTION: - Given an AIProjectClient, this sample demonstrates how to enumerate connections - and get connection properties. + Given an AIProjectClient, this sample demonstrates how to enumerate connections, + get connection properties, and create a chat completions client using the connection + properties. USAGE: python sample_connections.py @@ -16,10 +17,11 @@ pip install azure-ai-projects azure-identity Set these environment variables with your own values: - 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in the "Project overview" + 1) PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in the "Project overview" tab in your AI Studio Project page. - 2) CONNECTION_NAME - the name of a Serverless or Azure OpenAI connection, as found in the "Connections" tab + 2) CONNECTION_NAME - The name of a Serverless or Azure OpenAI connection, as found in the "Connections" tab in your AI Studio Hub page. + 3) MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. """ import os @@ -31,12 +33,17 @@ from azure.identity import DefaultAzureCredential, get_bearer_token_provider from azure.core.credentials import AzureKeyCredential +project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] +connection_name = os.environ["CONNECTION_NAME"] +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] + project_client = AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], + conn_str=project_connection_string, ) with project_client: + # List the properties of all connections connections = project_client.connections.list() print(f"====> Listing of all connections (found {len(connections)}):") @@ -53,15 +60,15 @@ # Get the properties of the default connection of a particular "type", with credentials connection = project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, + connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, # Optional. Defaults to "False" ) - print("====> Get default Azure Open AI connection:") + print("====> Get default Azure AI Services connection:") print(connection) # Get the properties of a connection by connection name: connection = project_client.connections.get( - connection_name=os.environ["CONNECTION_NAME"], with_credentials=True # Optional. Defaults to "False" + connection_name=connection_name, with_credentials=True # Optional. Defaults to "False" ) print("====> Get connection by name:") print(connection) @@ -91,7 +98,7 @@ raise ValueError(f"Authentication type {connection.authentication_type} not supported.") response = client.chat.completions.create( - model="gpt-4o", + model=model_deployment_name, messages=[ { "role": "user", @@ -116,6 +123,6 @@ else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + response = client.complete(model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")]) client.close() print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py index b5cb6919d14e..16144f785905 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py @@ -16,8 +16,9 @@ pip install azure-ai-projects aiohttp azure-identity - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. """ import os import asyncio @@ -28,15 +29,17 @@ async def sample_get_chat_completions_client_async(): + project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] + model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] + async with AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], + conn_str=project_connection_string, ) as project_client: - # Get an authenticated async ChatCompletionsClient (from azure.ai.inference) for your default Serverless connection: async with await project_client.inference.get_chat_completions_client() as client: - response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + response = await client.complete(model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")]) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py index 466c47466a36..5c5253179e7a 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py @@ -15,13 +15,12 @@ pip install azure-ai-projects aiohttp openai_async - Set this environment variable with your own value: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. Update the Azure OpenAI api-version as needed (see `api_version=` below). Values can be found here: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - - Update the model deployment name as needed. See `model=` below. """ import os import asyncio @@ -31,16 +30,19 @@ async def sample_get_azure_openai_client_async(): + project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] + model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] + async with AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], + conn_str=project_connection_string, ) as project_client: # Get an authenticated AsyncAzureOpenAI client for your default Azure OpenAI connection: - async with await project_client.inference.get_azure_openai_client() as client: + async with await project_client.inference.get_azure_openai_client(api_version="2024-06-01") as client: response = await client.chat.completions.create( - model="gpt-35-turbo-16k", + model=model_deployment_name, messages=[ { "role": "user", diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py index 092a1c5bde71..474d1a76ecff 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py @@ -16,8 +16,9 @@ pip install azure-ai-projects aiohttp azure-identity - Set this environment variable with your own value: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. """ import asyncio import os @@ -27,15 +28,18 @@ async def sample_get_embeddings_client_async(): + project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] + model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] + async with AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], + conn_str=project_connection_string, ) as project_client: # Get an authenticated async azure.ai.inference embeddings client for your default Serverless connection: async with await project_client.inference.get_embeddings_client() as client: - response = await client.embed(input=["first phrase", "second phrase", "third phrase"]) + response = await client.embed(model=model_deployment_name, input=["first phrase", "second phrase", "third phrase"]) for item in response.data: length = len(item.embedding) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py index afb822298929..693aeef4706d 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py @@ -17,22 +17,26 @@ pip install azure-ai-projects azure-identity - Set this environment variables with your own values: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. + * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. """ + import os from azure.ai.projects import AIProjectClient from azure.ai.inference.models import UserMessage from azure.identity import DefaultAzureCredential +project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] + with AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], + conn_str=project_connection_string, ) as project_client: - # Get an authenticated azure.ai.inference ChatCompletionsClient for your default Serverless connection: with project_client.inference.get_chat_completions_client() as client: - response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + response = client.complete(model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")]) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py index 21c0eedf5dc1..6e26adea5817 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py @@ -19,6 +19,7 @@ Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. """ @@ -28,9 +29,12 @@ from azure.identity import DefaultAzureCredential from azure.monitor.opentelemetry import configure_azure_monitor +project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] + with AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], + conn_str=project_connection_string, ) as project_client: # Enable Azure Monitor tracing @@ -44,6 +48,6 @@ # Get an authenticated azure.ai.inference ChatCompletionsClient for your default Serverless connection: with project_client.inference.get_chat_completions_client() as client: - response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + response = client.complete(model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")]) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py index 34164967b7f7..9c86bc51a717 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py @@ -17,7 +17,8 @@ pip install azure-ai-projects azure-ai-inference azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http Set these environment variables with your own values: - * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. + * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. """ @@ -27,9 +28,12 @@ from azure.ai.inference.models import UserMessage from azure.identity import DefaultAzureCredential +project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] + with AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], + conn_str=project_connection_string, ) as project_client: # Enable console tracing @@ -38,6 +42,6 @@ # Get an authenticated azure.ai.inference ChatCompletionsClient for your default Serverless connection: with project_client.inference.get_chat_completions_client() as client: - response = client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + response = client.complete(model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")]) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py index 3194355c1ce7..10c66051205d 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client.py @@ -15,28 +15,29 @@ pip install azure-ai-projects openai - Set this environment variable with your own value: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. Update the Azure OpenAI api-version as needed (see `api_version=` below). Values can be found here: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - - Update the model deployment name as needed. See `model=` below. """ import os from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential +project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] + with AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], + conn_str=project_connection_string, ) as project_client: - # Get an authenticated OpenAI client for your default Azure OpenAI connection: with project_client.inference.get_azure_openai_client(api_version="2024-06-01") as client: response = client.chat.completions.create( - model="gpt-35-turbo-16k", + model=model_deployment_name, messages=[ { "role": "user", diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py index 8ca479b33b9b..f9c49842ed93 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py @@ -19,22 +19,24 @@ Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. Update the Azure OpenAI api-version as needed (see `api_version=` below). Values can be found here: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - - Update the model deployment name as needed. See `model=` below. """ import os from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential from azure.monitor.opentelemetry import configure_azure_monitor +project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] + with AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], + conn_str=project_connection_string, ) as project_client: # Enable Azure Monitor tracing @@ -49,7 +51,7 @@ with project_client.inference.get_azure_openai_client(api_version="2024-06-01") as client: response = client.chat.completions.create( - model="gpt-35-turbo-16k", + model=model_deployment_name, messages=[ { "role": "user", diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py index 3ec2cae585e9..77d395ce63f0 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py @@ -18,22 +18,24 @@ Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. + * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. Update the Azure OpenAI api-version as needed (see `api_version=` below). Values can be found here: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - - Update the model deployment name as needed. See `model=` below. """ import os import sys from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential +project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] + with AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], + conn_str=project_connection_string, ) as project_client: # Enable console tracing @@ -43,7 +45,7 @@ with project_client.inference.get_azure_openai_client(api_version="2024-06-01") as client: response = client.chat.completions.create( - model="gpt-35-turbo-16k", + model=model_deployment_name, messages=[ { "role": "user", diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_text_embeddings_with_azure_ai_inference_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_text_embeddings_with_azure_ai_inference_client.py index ac603217b399..0588554f071f 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_text_embeddings_with_azure_ai_inference_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_text_embeddings_with_azure_ai_inference_client.py @@ -16,22 +16,26 @@ pip install azure-ai-projects azure-identity - Set this environment variable with your own value: - PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + Set these environment variable with your own values: + * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. + * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. """ import os from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential +project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] +model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] + with AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], + conn_str=project_connection_string, ) as project_client: # Get an authenticated azure.ai.inference embeddings client for your default Serverless connection: with project_client.inference.get_embeddings_client() as client: - response = client.embed(input=["first phrase", "second phrase", "third phrase"]) + response = client.embed(model=model_deployment_name, input=["first phrase", "second phrase", "third phrase"]) for item in response.data: length = len(item.embedding) diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py index 9cc5a7d99121..470d32779f33 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py @@ -20,6 +20,16 @@ def test_connections_get(self, **kwargs): with self.get_sync_client(**kwargs) as project_client: + assert project_client.connections.get( + connection_name="Some non-existing name", with_credentials=False + ) == None + + assert project_client.connections.get( + connection_name="Some non-existing name", with_credentials=True + ) == None + + return + connection = project_client.connections.get(connection_name=aoai_connection, with_credentials=False) print(connection) ConnectionsTestBase.validate_connection( @@ -67,6 +77,14 @@ def test_connections_get_default(self, **kwargs): with self.get_sync_client(**kwargs) as project_client: + assert project_client.connections.get_default( + connection_type="Some unrecognized type", with_credentials=False + ) == None + + assert project_client.connections.get_default( + connection_type="Some unrecognized type", with_credentials=True + ) == None + connection = project_client.connections.get_default( connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=False ) diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py index 9c21ee95c2d5..7246ccefbfab 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py @@ -13,12 +13,20 @@ class TestConnectionsAsync(ConnectionsTestBase): @servicePreparerConnectionsTests() @recorded_by_proxy_async - async def test_connections_get(self, **kwargs): + async def test_connections_get_async(self, **kwargs): aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_aoai_connection_name") serverless_connection = kwargs.pop("azure_ai_projects_connections_tests_serverless_connection_name") async with self.get_async_client(**kwargs) as project_client: + assert await project_client.connections.get( + connection_name="Some non-existing name", with_credentials=False + ) == None + + assert await project_client.connections.get( + connection_name="Some non-existing name", with_credentials=True + ) == None + connection = await project_client.connections.get(connection_name=aoai_connection, with_credentials=False) print(connection) ConnectionsTestBase.validate_connection( @@ -61,7 +69,7 @@ async def test_connections_get(self, **kwargs): @servicePreparerConnectionsTests() @recorded_by_proxy_async - async def test_connections_get_default(self, **kwargs): + async def test_connections_get_default_async(self, **kwargs): default_aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_default_aoai_connection_name") default_serverless_connection = kwargs.pop( @@ -70,6 +78,14 @@ async def test_connections_get_default(self, **kwargs): async with self.get_async_client(**kwargs) as project_client: + assert await project_client.connections.get_default( + connection_type="Some unrecognized type", with_credentials=False + ) == None + + assert await project_client.connections.get_default( + connection_type="Some unrecognized type", with_credentials=True + ) == None + connection = await project_client.connections.get_default( connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=False ) From 9f9b12f622eb10828f59c8aef5e82e7dc44d96f7 Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Thu, 31 Oct 2024 13:09:05 -0700 Subject: [PATCH 068/138] Fixed validation to put AyncFunctionTool into ToolSet and FunctionTool into AsyncToolSet. Introduce BaseToolset. (#38244) --- .../azure/ai/projects/models/_patch.py | 77 +++++++++++-------- 1 file changed, 45 insertions(+), 32 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index f84090ba6412..e7cd6b5ad9e0 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -289,9 +289,8 @@ def execute(self, tool_call: Any) -> Any: :return: The output of the tool operations. """ pass - - -class FunctionTool(Tool): + +class BaseFunctionTool(Tool): """ A tool that executes user-defined functions. """ @@ -354,15 +353,6 @@ def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, return function, parsed_arguments - def execute(self, tool_call: RequiredFunctionToolCall) -> Any: - function, parsed_arguments = self._get_func_and_args(tool_call) - - try: - return function(**parsed_arguments) if parsed_arguments else function() - except TypeError as e: - logging.error(f"Error executing function '{tool_call.function.name}': {e}") - raise - @property def definitions(self) -> List[ToolDefinition]: """ @@ -382,7 +372,19 @@ def resources(self) -> ToolResources: return ToolResources() -class AsyncFunctionTool(FunctionTool): +class FunctionTool(BaseFunctionTool): + + def execute(self, tool_call: RequiredFunctionToolCall) -> Any: + function, parsed_arguments = self._get_func_and_args(tool_call) + + try: + return function(**parsed_arguments) if parsed_arguments else function() + except TypeError as e: + logging.error(f"Error executing function '{tool_call.function.name}': {e}") + raise + + +class AsyncFunctionTool(BaseFunctionTool): async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: function, parsed_arguments = self._get_func_and_args(tool_call) @@ -500,27 +502,17 @@ def resources(self) -> ToolResources: def execute(self, tool_call: Any) -> Any: pass - -class ToolSet: +class BaseToolSet: """ - A collection of tools that can be used by an agent. + Abstract class for a collection of tools that can be used by an agent. """ def __init__(self): self._tools: List[Tool] = [] - def validate_tool_type(self, tool_type: Type[Tool]) -> None: - """ - Validate the type of the tool. - - :param tool_type: The type of the tool to validate. - :raises ValueError: If the tool type is not a subclass of Tool. - """ - if isinstance(tool_type, AsyncFunctionTool): - raise ValueError( - "AsyncFunctionTool is not supported in ToolSet. To use async functions, use AsyncToolSet and agents operations in azure.ai.projects.aio." - ) - + def validate_tool_type(self, tool: Tool) -> None: + pass + def add(self, tool: Tool): """ Add a tool to the tool set. @@ -528,7 +520,7 @@ def add(self, tool: Tool): :param tool: The tool to add. :raises ValueError: If a tool of the same type already exists. """ - self.validate_tool_type(type(tool)) + self.validate_tool_type(tool) if any(isinstance(existing_tool, type(tool)) for existing_tool in self._tools): raise ValueError("Tool of type {type(tool).__name__} already exists in the ToolSet.") @@ -608,6 +600,24 @@ def get_tool(self, tool_type: Type[Tool]) -> Tool: return tool raise ValueError(f"Tool of type {tool_type.__name__} not found.") +class ToolSet(BaseToolSet): + """ + A collection of tools that can be used by an synchronize agent. + """ + + def validate_tool_type(self, tool: Tool) -> None: + """ + Validate the type of the tool. + + :param tool_type: The type of the tool to validate. + :raises ValueError: If the tool type is not a subclass of Tool. + """ + if isinstance(tool, AsyncFunctionTool): + raise ValueError( + "AsyncFunctionTool is not supported in ToolSet. To use async functions, use AsyncToolSet and agents operations in azure.ai.projects.aio." + ) + + def execute_tool_calls(self, tool_calls: List[Any]) -> Any: """ Execute a tool of the specified type with the provided tool calls. @@ -633,16 +643,19 @@ def execute_tool_calls(self, tool_calls: List[Any]) -> Any: return tool_outputs -class AsyncToolSet(ToolSet): +class AsyncToolSet(BaseToolSet): + """ + A collection of tools that can be used by an asynchronize agent. + """ - def validate_tool_type(self, tool_type: Type[Tool]) -> None: + def validate_tool_type(self, tool: Tool) -> None: """ Validate the type of the tool. :param tool_type: The type of the tool to validate. :raises ValueError: If the tool type is not a subclass of Tool. """ - if isinstance(tool_type, FunctionTool): + if isinstance(tool, FunctionTool): raise ValueError( "FunctionTool is not supported in AsyncToolSet. Please use AsyncFunctionTool instead and provide sync and/or async function(s)." ) From 595ec6d0494d4a034575141efaec178afec8d104 Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Thu, 31 Oct 2024 16:51:57 -0500 Subject: [PATCH 069/138] adding ai agents instrumentor (#38200) * adding ai agents instrumentor * streaming tracing sample and fixes to streaming tracing * moved tracing to agents tracing folder * adding support for tracing list_messages and async tracing sample * removing some commented out code * adding separate console and azure monitor tracing samples * removing unused code * removing more unused code * renamed tracing to telemetry. removed throwing from instrument and uninstrument * Update sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py Co-authored-by: Liudmila Molkova * Update sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py Co-authored-by: Liudmila Molkova * Update sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py Co-authored-by: Liudmila Molkova * fix for streaming with eventhandler and toolset * adding previous fix for async version also * adding missing function --------- Co-authored-by: Marko Hietala Co-authored-by: Liudmila Molkova --- .../azure/ai/projects/operations/_patch.py | 11 + .../azure/ai/projects/telemetry/__init__.py | 1 + .../ai/projects/telemetry/agents/__init__.py | 13 + .../agents/_ai_agents_instrumentor.py | 1369 +++++++++++++++++ .../ai/projects/telemetry/agents/_utils.py | 138 ++ sdk/ai/azure-ai-projects/dev_requirements.txt | 1 + ...basics_async_with_azure_monitor_tracing.py | 93 ++ ...gents_basics_async_with_console_tracing.py | 85 + ...gents_basics_with_azure_monitor_tracing.py | 81 + ...mple_agents_basics_with_console_tracing.py | 73 + ...ts_functions_with_azure_monitor_tracing.py | 150 ++ ...e_agents_functions_with_console_tracing.py | 142 ++ ...eventhandler_with_azure_monitor_tracing.py | 117 ++ ...tream_eventhandler_with_console_tracing.py | 110 ++ 14 files changed, 2384 insertions(+) create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/__init__.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py create mode 100644 sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index b16a17949153..3838353a253d 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -421,6 +421,17 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str], **kwargs) -> None: "Could not call `AIInferenceInstrumentor().instrument()` since `azure-ai-inference` is not installed" ) + try: + from azure.ai.projects.telemetry.agents import AIAgentsInstrumentor + + instrumentor = AIAgentsInstrumentor() + if not instrumentor.is_instrumented(): + instrumentor.instrument() + except Exception as exc: + logger.warning( + "Could not call `AIAgentsInstrumentor().instrument()` " + str(exc) + ) + try: from opentelemetry.instrumentation.openai import OpenAIInstrumentor diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py new file mode 100644 index 000000000000..d55ccad1f573 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/__init__.py @@ -0,0 +1 @@ +__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/__init__.py new file mode 100644 index 000000000000..34fb7e5f7cd8 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/__init__.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._ai_agents_instrumentor import AIAgentsInstrumentor + +__all__ = [ + "AIAgentsInstrumentor", +] diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py new file mode 100644 index 000000000000..ce345cf0b883 --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py @@ -0,0 +1,1369 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import copy +from enum import Enum +import functools +import json +import importlib +import logging +import os +from azure.ai.projects import _types +from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union +from urllib.parse import urlparse +from azure.ai.projects.telemetry.agents._utils import * # pylint: disable=unused-wildcard-import + +# pylint: disable = no-name-in-module +from azure.core import CaseInsensitiveEnumMeta # type: ignore +from azure.core.settings import settings +from azure.ai.projects.operations import AgentsOperations +from azure.ai.projects.aio.operations import AgentsOperations as AsyncAgentOperations +from azure.ai.projects.models import _models, AgentRunStream +from azure.ai.projects.models._enums import MessageRole, RunStepStatus +from azure.ai.projects.models._models import MessageAttachment, MessageDeltaChunk, RunStep, RunStepDeltaChunk, RunStepFunctionToolCall, RunStepToolCallDetails, SubmitToolOutputsAction, ThreadMessage, ThreadMessageOptions, ThreadRun, ToolDefinition, ToolOutput, ToolResources +from azure.ai.projects.models._patch import AgentEventHandler, ToolSet + +_Unset: Any = object() + +try: + # pylint: disable = no-name-in-module + from azure.core.tracing import AbstractSpan, SpanKind # type: ignore + from opentelemetry.trace import StatusCode, Span + + _tracing_library_available = True +except ModuleNotFoundError: + + _tracing_library_available = False + + +__all__ = [ + "AIAgentsInstrumentor", +] + + +_agents_traces_enabled: bool = False +_trace_agents_content: bool = False + + +class TraceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): # pylint: disable=C4747 + """An enumeration class to represent different types of traces.""" + + AGENTS = "Agents" + + +class AIAgentsInstrumentor: + """ + A class for managing the trace instrumentation of AI Agents. + + This class allows enabling or disabling tracing for AI Agents. + and provides functionality to check whether instrumentation is active. + + """ + + def __init__(self): + if not _tracing_library_available: + raise ModuleNotFoundError( + "Azure Core Tracing Opentelemetry is not installed. " + "Please install it using 'pip install azure-core-tracing-opentelemetry'" + ) + # In the future we could support different versions from the same library + # and have a parameter that specifies the version to use. + self._impl = _AIAgentsInstrumentorPreview() + + def instrument(self) -> None: + """ + Enable trace instrumentation for AI Agents. + + """ + self._impl.instrument() + + def uninstrument(self) -> None: + """ + Remove trace instrumentation for AI Agents. + + This method removes any active instrumentation, stopping the tracing + of AI Agents. + """ + self._impl.uninstrument() + + def is_instrumented(self) -> bool: + """ + Check if trace instrumentation for AI Agents is currently enabled. + + :return: True if instrumentation is active, False otherwise. + :rtype: bool + """ + return self._impl.is_instrumented() + + +class _AIAgentsInstrumentorPreview: + """ + A class for managing the trace instrumentation of AI Agents. + + This class allows enabling or disabling tracing for AI Agents. + and provides functionality to check whether instrumentation is active. + """ + + def _str_to_bool(self, s): + if s is None: + return False + return str(s).lower() == "true" + + def instrument(self): + """ + Enable trace instrumentation for AI Agents. + + This method checks the environment variable + 'AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED' to determine + whether to enable content tracing. + """ + if not self.is_instrumented(): + var_value = os.environ.get("AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED") + enable_content_tracing = self._str_to_bool(var_value) + self._instrument_agents(enable_content_tracing) + + def uninstrument(self): + """ + Disable trace instrumentation for AI Agents. + + This method removes any active instrumentation, stopping the tracing + of AI Agents. + """ + if self.is_instrumented(): + self._uninstrument_agents() + + def is_instrumented(self): + """ + Check if trace instrumentation for AI Agents is currently enabled. + + :return: True if instrumentation is active, False otherwise. + :rtype: bool + """ + return self._is_instrumented() + + def _set_attributes(self, span: "AbstractSpan", *attrs: Tuple[str, Any]) -> None: + for attr in attrs: + key, value = attr + if value is not None: + span.add_attribute(key, value) + + def _parse_url(self, url): + parsed = urlparse(url) + server_address = parsed.hostname + port = parsed.port + return server_address, port + + def _remove_function_call_names_and_arguments(self, tool_calls: list) -> list: + tool_calls_copy = copy.deepcopy(tool_calls) + for tool_call in tool_calls_copy: + if "function" in tool_call: + if "name" in tool_call["function"]: + del tool_call["function"]["name"] + if "arguments" in tool_call["function"]: + del tool_call["function"]["arguments"] + if not tool_call["function"]: + del tool_call["function"] + return tool_calls_copy + + def _create_event_attributes( + self, + thread_id: str = None, + agent_id: str = None, + thread_run_id: str = None, + message_id: str = None, + message_status: str = None, + usage: Optional[_models.RunStepCompletionUsage] = None + ) -> dict: + attrs = {GEN_AI_SYSTEM: AZ_AI_AGENT_SYSTEM} + if thread_id: + attrs[GEN_AI_THREAD_ID] = thread_id + + if agent_id: + attrs[GEN_AI_AGENT_ID] = agent_id + + if thread_run_id: + attrs[GEN_AI_THREAD_RUN_ID] = thread_run_id + + if message_id: + attrs[GEN_AI_MESSAGE_ID] = message_id + + if message_status: + attrs[GEN_AI_MESSAGE_STATUS] = message_status + + if usage: + attrs[GEN_AI_USAGE_INPUT_TOKENS] = usage.prompt_tokens + attrs[GEN_AI_USAGE_OUTPUT_TOKENS] = usage.completion_tokens + + return attrs + + def add_thread_message_event(self, span, message: ThreadMessage, usage: Optional[_models.RunStepCompletionUsage] = None) -> None: + content_body = {} + if _trace_agents_content: + for content in message.content: + typed_content = content.get(content.type, None) + if typed_content: + content_details = {"value": self._get_field(typed_content, "value")} + annotations = self._get_field(typed_content, "annotations") + if annotations: + content_details["annotations"] = annotations + content_body[content.type] = content_details + + self._add_message_event( + span, + self._get_role(message.role), + content_body, + attachments=message.attachments, + thread_id=message.thread_id, + agent_id=message.assistant_id, + message_id=message.id, + thread_run_id=message.run_id, + message_status=message.status, + incomplete_details=message.incomplete_details, + usage=usage) + + def _add_message_event( + self, + span, + role: str, + content: Any, + attachments: Any = None, #Optional[List[MessageAttachment]] or dict + thread_id: Optional[str] = None, + agent_id: Optional[str] = None, + message_id: Optional[str] = None, + thread_run_id: Optional[str] = None, + message_status: Optional[str] = None, + incomplete_details: Optional[str] = None, + usage: Optional[_models.RunStepCompletionUsage] = None) -> None: + # TODO document new fields + + event_body = {} + if _trace_agents_content: + event_body["content"] = content + if attachments: + event_body["attachments"] = [] + for attachment in attachments: + attachment_body = {"id": attachment.file_id} + if attachment.tools: + attachment_body["tools"] = [self._get_field(tool, "type") for tool in attachment.tools] + event_body["attachments"].append(attachment_body) + + if incomplete_details: + event_body["incomplete_details"] = incomplete_details + event_body["role"] = role + + attributes = self._create_event_attributes( + thread_id=thread_id, + agent_id=agent_id, + thread_run_id=thread_run_id, + message_id=message_id, + message_status=message_status, + usage=usage) + attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body) + span.span_instance.add_event(name=f"gen_ai.{role}.message", attributes=attributes) + + def _get_field(self, obj: Any, field: str) -> Any: + if not obj: + return None + + if isinstance(obj, dict): + return obj.get(field, None) + + return getattr(obj, field, None) + + def _add_instructions_event( + self, + span: "AbstractSpan", + instructions: str, + additional_instructions: str, + agent_id: Optional[str] = None, + thread_id: Optional[str] = None) -> None: + if not instructions: + return + + event_body = {} + if _trace_agents_content and (instructions or additional_instructions): + if instructions and additional_instructions: + event_body["content"] = f"{instructions} {additional_instructions}" + else: + event_body["content"] = instructions or additional_instructions + + attributes = self._create_event_attributes(agent_id=agent_id, thread_id=thread_id) + attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body) + span.span_instance.add_event(name=GEN_AI_SYSTEM_MESSAGE, attributes=attributes) + + def _get_role(self, role: Union[str, MessageRole]) -> str: + if role is None or role is _Unset: + return "user" + + if isinstance(role, MessageRole): + return role.value + + return role + + def _add_tool_assistant_message_event(self, span, step: RunStep) -> None: + # do we want a new event for it ? + tool_calls = [{"id": t.id, + "type": t.type, + "function" : { + "name": t.function.name, + "arguments": json.loads(t.function.arguments) + } if isinstance(t, RunStepFunctionToolCall) else None, + } for t in step.step_details.tool_calls] + + attributes = self._create_event_attributes(thread_id=step.thread_id, + agent_id=step.assistant_id, + thread_run_id=step.run_id, + message_status=step.status, + usage=step.usage) + + attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls}) + span.span_instance.add_event(name=f"gen_ai.assistant.message", attributes=attributes) + + def set_end_run(self, span: "AbstractSpan", run: ThreadRun) -> None: + if span and span.span_instance.is_recording: + span.add_attribute(GEN_AI_THREAD_RUN_STATUS, run.status) + span.add_attribute(GEN_AI_RESPONSE_MODEL, run.model) + if run.usage: + span.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, run.usage.prompt_tokens) + span.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, run.usage.completion_tokens) + + def start_thread_run_span( + self, + operation_name: OperationName, + project_name: str, + thread_id: str, + agent_id: str, + model: Optional[str] = None, + instructions: Optional[str] = None, + additional_instructions: Optional[str] = None, + additional_messages: Optional[List[ThreadMessage]] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + tools: Optional[List[ToolDefinition]] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + ) -> "AbstractSpan": + span = start_span( + operation_name, + project_name, + thread_id=thread_id, + agent_id=agent_id, + model=model, + temperature=temperature, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=response_format.value if response_format else None) + if span and span.span_instance.is_recording: + self._add_instructions_event(span, instructions, additional_instructions, thread_id=thread_id, agent_id=agent_id) + + if additional_messages: + for message in additional_messages: + self.add_thread_message_event(span, message) + return span + + def start_submit_tool_outputs_span( + self, + project_name: str, + thread_id: str, + run_id: str, + tool_outputs: List[ToolOutput] = _Unset, + event_handler: Optional[AgentEventHandler] = None, + ) -> "AbstractSpan": + + run_span = event_handler.span if isinstance(event_handler, _AgentEventHandlerTraceWrapper) else None + recorded = self._add_tool_message_events(run_span, tool_outputs) + + span = start_span(OperationName.SUBMIT_TOOL_OUTPUTS, + project_name, + thread_id=thread_id, + run_id=run_id) + if not recorded: + self._add_tool_message_events(span, tool_outputs) + return span + + + def _add_tool_message_events(self, span, tool_outputs: List[ToolOutput]) -> bool: + if span and span.span_instance.is_recording: + for tool_output in tool_outputs: + body = {"content": tool_output["output"], "id": tool_output["tool_call_id"]} + span.span_instance.add_event("gen_ai.tool.message", {"gen_ai.event.content": json.dumps(body)}) + return True + + return False + + def start_create_agent_span( + self, + project_name: str, + model: str = _Unset, + name: Optional[str] = None, + description: Optional[str] = None, + instructions: Optional[str] = None, + tools: Optional[List[ToolDefinition]] = None, + tool_resources: Optional[ToolResources] = None, + toolset: Optional[ToolSet] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, + ) -> "AbstractSpan": + span = start_span(OperationName.CREATE_AGENT, + project_name, + span_name=f"{OperationName.CREATE_AGENT.value} {name}", + model=model, + temperature=temperature, + top_p=top_p, + response_format=response_format.value if response_format else None) + if span and span.span_instance.is_recording: + if name: + span.add_attribute(GEN_AI_AGENT_NAME, name) + if description: + span.add_attribute(GEN_AI_AGENT_DESCRIPTION, description) + self._add_instructions_event(span, instructions, None) + + return span + + def start_create_thread_span( + self, + project_name: str, + messages: Optional[List[ThreadMessageOptions]] = None, + tool_resources: Optional[ToolResources] = None, + ) -> "AbstractSpan": + span = start_span(OperationName.CREATE_THREAD, project_name) + if span and span.span_instance.is_recording: + for message in messages or []: + self.add_thread_message_event(span, message) + + return span + + def start_list_messages_span( + self, + project_name: str, + thread_id: str + ) -> "AbstractSpan": + return start_span(OperationName.LIST_MESSAGES, project_name, thread_id=thread_id) + + def trace_create_agent(self, function, *args, **kwargs): + project_name = args[0]._config.project_name + name=kwargs.get("name") + model=kwargs.get("model") + description=kwargs.get("description") + instructions=kwargs.get("instructions") + tools=kwargs.get("tools") + tool_resources=kwargs.get("tool_resources") + toolset=kwargs.get("toolset") + temperature=kwargs.get("temperature") + top_p=kwargs.get("top_p") + response_format=kwargs.get("response_format") + + with self.start_create_agent_span( + project_name=project_name, + name=name, + model=model, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + toolset=toolset, + temperature=temperature, + top_p=top_p, + response_format=response_format) as span: + try: + result = function(*args, **kwargs) + span.add_attribute(GEN_AI_AGENT_ID, result.id) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_agent_async(self, function, *args, **kwargs): + project_name = args[0]._config.project_name + name=kwargs.get("name") + model=kwargs.get("model") + description=kwargs.get("description") + instructions=kwargs.get("instructions") + tools=kwargs.get("tools") + tool_resources=kwargs.get("tool_resources") + toolset=kwargs.get("toolset") + temperature=kwargs.get("temperature") + top_p=kwargs.get("top_p") + response_format=kwargs.get("response_format") + + with self.start_create_agent_span( + project_name=project_name, + name=name, + model=model, + description=description, + instructions=instructions, + tools=tools, + tool_resources=tool_resources, + toolset=toolset, + temperature=temperature, + top_p=top_p, + response_format=response_format) as span: + try: + result = await function(*args, **kwargs) + span.add_attribute(GEN_AI_AGENT_ID, result.id) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_create_thread(self, function, *args, **kwargs): + project_name = args[0]._config.project_name + messages=kwargs.get("messages") + + with self.start_create_thread_span( + project_name=project_name, + messages=messages) as span: + try: + result = function(*args, **kwargs) + span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_thread_async(self, function, *args, **kwargs): + project_name = args[0]._config.project_name + messages=kwargs.get("messages") + + with self.start_create_thread_span( + project_name=project_name, + messages=messages) as span: + try: + result = await function(*args, **kwargs) + span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_create_message(self, function, *args, **kwargs): + project_name = args[0]._config.project_name + messages = kwargs.get("messages") + thread_id = kwargs.get("thread_id") + role = kwargs.get("role") + content= kwargs.get("content") + attachments = kwargs.get("attachments") + + with self.start_create_message_span( + project_name=project_name, + thread_id=thread_id, + content=content, + role=role, + attachments=attachments) as span: + try: + result = function(*args, **kwargs) + span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_message_async(self, function, *args, **kwargs): + project_name = args[0]._config.project_name + messages = kwargs.get("messages") + thread_id = kwargs.get("thread_id") + role = kwargs.get("role") + content= kwargs.get("content") + attachments = kwargs.get("attachments") + + with self.start_create_message_span( + project_name=project_name, + thread_id=thread_id, + content=content, + role=role, + attachments=attachments) as span: + try: + result = await function(*args, **kwargs) + span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_create_run(self, operation_name, function, *args, **kwargs): + project_name = args[0]._config.project_name + thread_id = kwargs.get("thread_id") + assistant_id = kwargs.get("assistant_id") + model = kwargs.get("model") + instructions = kwargs.get("instructions") + additional_instructions = kwargs.get("additional_instructions") + additional_messages = kwargs.get("additional_messages") + temperature = kwargs.get("temperature") + tools = kwargs.get("tools") + top_p = kwargs.get("top_p") + max_prompt_tokens = kwargs.get("max_prompt_tokens") + max_completion_tokens = kwargs.get("max_completion_tokens") + response_format = kwargs.get("response_format") + + with self.start_thread_run_span( + operation_name, + project_name, + thread_id, + assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + temperature=temperature, + tools=tools, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=response_format) as span: + try: + result = function(*args, **kwargs) + self.set_end_run(span, result) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_run_async(self, operation_name, function, *args, **kwargs): + project_name = args[0]._config.project_name + thread_id = kwargs.get("thread_id") + assistant_id = kwargs.get("assistant_id") + model = kwargs.get("model") + instructions = kwargs.get("instructions") + additional_instructions = kwargs.get("additional_instructions") + additional_messages = kwargs.get("additional_messages") + temperature = kwargs.get("temperature") + tools = kwargs.get("tools") + top_p = kwargs.get("top_p") + max_prompt_tokens = kwargs.get("max_prompt_tokens") + max_completion_tokens = kwargs.get("max_completion_tokens") + response_format = kwargs.get("response_format") + + with self.start_thread_run_span( + operation_name, + project_name, + thread_id, + assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + temperature=temperature, + tools=tools, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=response_format) as span: + try: + result = await function(*args, **kwargs) + if span.span_instance.is_recording: + span.add_attribute(GEN_AI_THREAD_RUN_STATUS, result.status) + span.add_attribute(GEN_AI_RESPONSE_MODEL, result.model) + if result.usage: + result.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, result.usage.prompt_tokens) + result.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, result.usage.completion_tokens) + result.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_submit_tool_outputs(self, stream, function, *args, **kwargs): + project_name = args[0]._config.project_name + thread_id = kwargs.get("thread_id") + run_id = kwargs.get("run_id") + tool_outputs = kwargs.get("tool_outputs") + event_handler = kwargs.get("event_handler") + + with self.start_submit_tool_outputs_span( + project_name=project_name, + thread_id=thread_id, + run_id=run_id, + tool_outputs=tool_outputs, + event_handler=event_handler) as span: + try: + if stream: + kwargs['event_handler'] = self.wrap_handler(event_handler, span) + + result = function(*args, **kwargs) + if not isinstance(result, AgentRunStream): + self.set_end_run(span, result) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_submit_tool_outputs_async(self, stream, function, *args, **kwargs): + project_name = args[0]._config.project_name + thread_id = kwargs.get("thread_id") + run_id = kwargs.get("run_id") + tool_outputs = kwargs.get("tool_outputs") + event_handler = kwargs.get("event_handler") + + with self.start_submit_tool_outputs_span( + project_name=project_name, + thread_id=thread_id, + run_id=run_id, + tool_outputs=tool_outputs, + event_handler=event_handler) as span: + try: + if stream: + kwargs['event_handler'] = self.wrap_handler(event_handler, span) + + result = await function(*args, **kwargs) + if not isinstance(result, AgentRunStream): + self.set_end_run(span, result) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_handle_submit_tool_outputs(self, function, *args, **kwargs): + event_handler = kwargs.get("event_handler") + if event_handler is None: + event_handler = args[2] + span = getattr(event_handler, "span", None) + with span.change_context(span.span_instance): + try: + result = function(*args, **kwargs) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_handle_submit_tool_outputs_async(self, function, *args, **kwargs): + event_handler = kwargs.get("event_handler") + if event_handler is None: + event_handler = args[2] + span = getattr(event_handler, "span", None) + with span.change_context(span.span_instance): + try: + result = await function(*args, **kwargs) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_create_stream(self, function, *args, **kwargs): + operation_name = OperationName.PROCESS_THREAD_RUN + project_name = args[0]._config.project_name + thread_id = kwargs.get("thread_id") + assistant_id = kwargs.get("assistant_id") + model = kwargs.get("model") + instructions = kwargs.get("instructions") + additional_instructions = kwargs.get("additional_instructions") + additional_messages = kwargs.get("additional_messages") + temperature = kwargs.get("temperature") + tools = kwargs.get("tools") + top_p = kwargs.get("top_p") + max_prompt_tokens = kwargs.get("max_prompt_tokens") + max_completion_tokens = kwargs.get("max_completion_tokens") + response_format = kwargs.get("response_format") + event_handler = kwargs.get("event_handler") + + span = self.start_thread_run_span( + operation_name, + project_name, + thread_id, + assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + temperature=temperature, + tools=tools, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=response_format) + + # TODO: how to keep span active in the current context without existing? + # TODO: dummy span for none + with span.change_context(span.span_instance): + try: + kwargs['event_handler'] = self.wrap_handler(event_handler, span) + result = function(*args, **kwargs) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_create_stream_async(self, function, *args, **kwargs): + operation_name = OperationName.PROCESS_THREAD_RUN + project_name = args[0]._config.project_name + thread_id = kwargs.get("thread_id") + assistant_id = kwargs.get("assistant_id") + model = kwargs.get("model") + instructions = kwargs.get("instructions") + additional_instructions = kwargs.get("additional_instructions") + additional_messages = kwargs.get("additional_messages") + temperature = kwargs.get("temperature") + tools = kwargs.get("tools") + top_p = kwargs.get("top_p") + max_prompt_tokens = kwargs.get("max_prompt_tokens") + max_completion_tokens = kwargs.get("max_completion_tokens") + response_format = kwargs.get("response_format") + event_handler = kwargs.get("event_handler") + + span = self.start_thread_run_span( + operation_name, + project_name, + thread_id, + assistant_id, + model=model, + instructions=instructions, + additional_instructions=additional_instructions, + additional_messages=additional_messages, + temperature=temperature, + tools=tools, + top_p=top_p, + max_prompt_tokens=max_prompt_tokens, + max_completion_tokens=max_completion_tokens, + response_format=response_format) + + # TODO: how to keep span active in the current context without existing? + # TODO: dummy span for none + with span.change_context(span.span_instance): + try: + kwargs['event_handler'] = self.wrap_handler(event_handler, span) + result = await function(*args, **kwargs) + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def trace_list_messages(self, function, *args, **kwargs): + project_name = args[0]._config.project_name + thread_id = kwargs.get("thread_id") + + with self.start_list_messages_span( + project_name=project_name, + thread_id=thread_id) as span: + try: + result = function(*args, **kwargs) + for message in result.data: + self.add_thread_message_event(span, message) + + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + async def trace_list_messages_async(self, function, *args, **kwargs): + project_name = args[0]._config.project_name + thread_id = kwargs.get("thread_id") + + with self.start_list_messages_span( + project_name=project_name, + thread_id=thread_id) as span: + try: + result = await function(*args, **kwargs) + for message in result.data: + self.add_thread_message_event(span, message) + + except Exception as exc: + # Set the span status to error + if isinstance(span.span_instance, Span): # pyright: ignore [reportPossiblyUnboundVariable] + span.span_instance.set_status( + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + description=str(exc), + ) + module = getattr(exc, "__module__", "") + module = module if module != "builtins" else "" + error_type = f"{module}.{type(exc).__name__}" if module else type(exc).__name__ + self._set_attributes(span, ("error.type", error_type)) + raise + + return result + + def handle_run_stream_exit(self, function, *args, **kwargs): + agent_run_stream = args[0] + exc_type = kwargs.get("exc_type") + exc_val = kwargs.get("exc_val") + exc_tb = kwargs.get("exc_tb") + # TODO: is it a good idea? + # if not, we'll need to wrap stream and call exit + if agent_run_stream.event_handler and agent_run_stream.event_handler.__class__.__name__ == "_AgentEventHandlerTraceWrapper": + agent_run_stream.event_handler.__exit__(exc_type, exc_val, exc_tb) + + def wrap_handler(self, handler: "_models.AgentEventHandler", span: "AbstractSpan") -> "_models.AgentEventHandler": + if isinstance(handler, _AgentEventHandlerTraceWrapper): + return handler + + if span and span.span_instance.is_recording: + return _AgentEventHandlerTraceWrapper(handler, self, span) + + return handler + + def start_create_message_span( + self, + project_name: str, + thread_id: str, + content: str, + role: Union[str, MessageRole] = _Unset, + attachments: Optional[List[MessageAttachment]] = None + ) -> "AbstractSpan": + role_str = self._get_role(role) + span = start_span(OperationName.CREATE_MESSAGE, project_name, thread_id=thread_id) + if span and span.span_instance.is_recording: + self._add_message_event(span, role_str, content, attachments=attachments, thread_id=thread_id) + return span + + def _trace_sync_function( + self, + function: Callable, + *, + _args_to_ignore: Optional[List[str]] = None, + _trace_type=TraceType.AGENTS, + _name: Optional[str] = None, + ) -> Callable: + """ + Decorator that adds tracing to a synchronous function. + + :param function: The function to be traced. + :type function: Callable + :param args_to_ignore: A list of argument names to be ignored in the trace. + Defaults to None. + :type: args_to_ignore: [List[str]], optional + :param trace_type: The type of the trace. Defaults to TraceType.AGENTS. + :type trace_type: TraceType, optional + :param name: The name of the trace, will set to func name if not provided. + :type name: str, optional + :return: The traced function. + :rtype: Callable + """ + + @functools.wraps(function) + def inner(*args, **kwargs): + + span_impl_type = settings.tracing_implementation() + if span_impl_type is None: + return function(*args, **kwargs) + + class_function_name = function.__qualname__ + + if class_function_name.startswith("AgentsOperations.create_agent"): + return self.trace_create_agent(function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.create_thread"): + return self.trace_create_thread(function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.create_message"): + return self.trace_create_message(function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.create_run"): + return self.trace_create_run(OperationName.START_THREAD_RUN, function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.create_and_process_run"): + return self.trace_create_run(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_run"): + return self.trace_submit_tool_outputs(False, function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_stream"): + return self.trace_submit_tool_outputs(True, function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations._handle_submit_tool_outputs"): + return self.trace_handle_submit_tool_outputs(function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.create_stream"): + return self.trace_create_stream(function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.list_messages"): + return self.trace_list_messages(function, *args, **kwargs) + elif class_function_name.startswith("AgentRunStream.__exit__"): + return self.handle_run_stream_exit(function, *args, **kwargs) + # Handle the default case (if the function name does not match) + return None # Ensure all paths return + + return inner + + def _trace_async_function( + self, + function: Callable, + *, + _args_to_ignore: Optional[List[str]] = None, + _trace_type=TraceType.AGENTS, + _name: Optional[str] = None, + ) -> Callable: + """ + Decorator that adds tracing to an asynchronous function. + + :param function: The function to be traced. + :type function: Callable + :param args_to_ignore: A list of argument names to be ignored in the trace. + Defaults to None. + :type: args_to_ignore: [List[str]], optional + :param trace_type: The type of the trace. Defaults to TraceType.AGENTS. + :type trace_type: TraceType, optional + :param name: The name of the trace, will set to func name if not provided. + :type name: str, optional + :return: The traced function. + :rtype: Callable + """ + + @functools.wraps(function) + async def inner(*args, **kwargs): + + span_impl_type = settings.tracing_implementation() + if span_impl_type is None: + return function(*args, **kwargs) + + class_function_name = function.__qualname__ + + if class_function_name.startswith("AgentsOperations.create_agent"): + return await self.trace_create_agent_async(function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.create_thread"): + return await self.trace_create_thread_async(function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.create_message"): + return await self.trace_create_message_async(function,*args, **kwargs) + elif class_function_name.startswith("AgentsOperations.create_run"): + return await self.trace_create_run_async(OperationName.START_THREAD_RUN, function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.create_and_process_run"): + return await self.trace_create_run_async(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_run"): + return await self.trace_submit_tool_outputs_async(function, False, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_stream"): + return await self.trace_submit_tool_outputs_async(function, True, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations._handle_submit_tool_outputs"): + return await self.trace_handle_submit_tool_outputs_async(function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.create_stream"): + return await self.trace_create_stream_async(function, *args, **kwargs) + elif class_function_name.startswith("AgentsOperations.list_messages"): + return await self.trace_list_messages_async(function, *args, **kwargs) + # Handle the default case (if the function name does not match) + return None # Ensure all paths return + + return inner + + def _inject_async(self, f, _trace_type, _name): + wrapper_fun = self._trace_async_function(f) + wrapper_fun._original = f # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + return wrapper_fun + + def _inject_sync(self, f, _trace_type, _name): + wrapper_fun = self._trace_sync_function(f) + wrapper_fun._original = f # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + return wrapper_fun + + def _agents_apis(self): + sync_apis = ( + ("azure.ai.projects.operations", "AgentsOperations", "create_agent", TraceType.AGENTS, "agent_create"), + ("azure.ai.projects.operations", "AgentsOperations", "create_thread", TraceType.AGENTS, "thread_create"), + ("azure.ai.projects.operations", "AgentsOperations", "create_message", TraceType.AGENTS, "message_create"), + ("azure.ai.projects.operations", "AgentsOperations", "create_run", TraceType.AGENTS, "create_run"), + ("azure.ai.projects.operations", "AgentsOperations", "create_and_process_run", TraceType.AGENTS, "create_and_process_run"), + ("azure.ai.projects.operations", "AgentsOperations", "submit_tool_outputs_to_run", TraceType.AGENTS, "submit_tool_outputs_to_run"), + ("azure.ai.projects.operations", "AgentsOperations", "submit_tool_outputs_to_stream", TraceType.AGENTS, "submit_tool_outputs_to_stream"), + ("azure.ai.projects.operations", "AgentsOperations", "_handle_submit_tool_outputs", TraceType.AGENTS, "_handle_submit_tool_outputs"), + ("azure.ai.projects.operations", "AgentsOperations", "create_stream", TraceType.AGENTS, "create_stream"), + ("azure.ai.projects.operations", "AgentsOperations", "list_messages", TraceType.AGENTS, "list_messages"), + ("azure.ai.projects.models", "AgentRunStream", "__exit__", TraceType.AGENTS, "__exit__"), + ) + async_apis = ( + ("azure.ai.projects.aio.operations", "AgentsOperations", "create_agent", TraceType.AGENTS, "agent_create"), + ("azure.ai.projects.aio.operations", "AgentsOperations", "create_thread", TraceType.AGENTS, "agents_thread_create"), + ("azure.ai.projects.aio.operations", "AgentsOperations", "create_message", TraceType.AGENTS, "agents_thread_message"), + ("azure.ai.projects.aio.operations", "AgentsOperations", "create_run", TraceType.AGENTS, "create_run"), + ("azure.ai.projects.aio.operations", "AgentsOperations", "create_and_process_run", TraceType.AGENTS, "create_and_process_run"), + ("azure.ai.projects.aio.operations", "AgentsOperations", "submit_tool_outputs_to_run", TraceType.AGENTS, "submit_tool_outputs_to_run"), + ("azure.ai.projects.aio.operations", "AgentsOperations", "submit_tool_outputs_to_stream", TraceType.AGENTS, "submit_tool_outputs_to_stream"), + ("azure.ai.projects.aio.operations", "AgentsOperations", "_handle_submit_tool_outputs", TraceType.AGENTS, "_handle_submit_tool_outputs"), + ("azure.ai.projects.aio.operations", "AgentsOperations", "create_stream", TraceType.AGENTS, "create_stream"), + ("azure.ai.projects.aio.operations", "AgentsOperations", "list_messages", TraceType.AGENTS, "list_messages"), + ) + return sync_apis, async_apis + + def _agents_api_list(self): + sync_apis, async_apis = self._agents_apis() + yield sync_apis, self._inject_sync + yield async_apis, self._inject_async + + def _generate_api_and_injector(self, apis): + for api, injector in apis: + for module_name, class_name, method_name, trace_type, name in api: + try: + module = importlib.import_module(module_name) + api = getattr(module, class_name) + if hasattr(api, method_name): + yield api, method_name, trace_type, injector, name + except AttributeError as e: + # Log the attribute exception with the missing class information + logging.warning( + "AttributeError: The module '%s' does not have the class '%s'. %s", + module_name, + class_name, + str(e), + ) + except Exception as e: # pylint: disable=broad-except + # Log other exceptions as a warning, as we're not sure what they might be + logging.warning("An unexpected error occurred: '%s'", str(e)) + + def _available_agents_apis_and_injectors(self): + """ + Generates a sequence of tuples containing Agents API classes, method names, and + corresponding injector functions. + + :return: A generator yielding tuples. + :rtype: tuple + """ + yield from self._generate_api_and_injector(self._agents_api_list()) + + def _instrument_agents(self, enable_content_tracing: bool = False): + """This function modifies the methods of the Agents API classes to + inject logic before calling the original methods. + The original methods are stored as _original attributes of the methods. + + :param enable_content_tracing: Indicates whether tracing of message content should be enabled. + This also controls whether function call tool function names, + parameter names and parameter values are traced. + :type enable_content_tracing: bool + """ + # pylint: disable=W0603 + global _agents_traces_enabled + global _trace_agents_content + if _agents_traces_enabled: + raise RuntimeError("Traces already started for AI Agents") + _agents_traces_enabled = True + _trace_agents_content = enable_content_tracing + for ( + api, + method, + trace_type, + injector, + name, + ) in self._available_agents_apis_and_injectors(): + # Check if the method of the api class has already been modified + if not hasattr(getattr(api, method), "_original"): + setattr(api, method, injector(getattr(api, method), trace_type, name)) + + def _uninstrument_agents(self): + """This function restores the original methods of the Agents API classes + by assigning them back from the _original attributes of the modified methods. + """ + # pylint: disable=W0603 + global _agents_traces_enabled + global _trace_agents_content + _trace_agents_content = False + for api, method, _, _, _ in self._available_agents_apis_and_injectors(): + if hasattr(getattr(api, method), "_original"): + setattr(api, method, getattr(getattr(api, method), "_original")) + _agents_traces_enabled = False + + def _is_instrumented(self): + """This function returns True if Agents API has already been instrumented + for tracing and False if it has not been instrumented. + + :return: A value indicating whether the Agents API is currently instrumented or not. + :rtype: bool + """ + return _agents_traces_enabled + + +class _AgentEventHandlerTraceWrapper(AgentEventHandler): + def __init__(self, inner_handler: AgentEventHandler, instrumentor: AIAgentsInstrumentor, span: "AbstractSpan"): + super().__init__() + self.span = span + self.inner_handler = inner_handler + self.ended = False + self.last_run = None + self.last_message = None + self.instrumentor = instrumentor + + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + if self.inner_handler: + self.inner_handler.on_message_delta(delta) + + def on_thread_message(self, message: "ThreadMessage") -> None: + if self.inner_handler: + self.inner_handler.on_thread_message(message) + + if message.status == "completed" or message.status == "incomplete": + self.last_message = message + + def on_thread_run(self, run: "ThreadRun") -> None: + if self.inner_handler: + self.inner_handler.on_thread_run(run) + self.last_run = run + + def on_run_step(self, step: "RunStep") -> None: + if self.inner_handler: + self.inner_handler.on_run_step(step) + + if step.status == RunStepStatus.IN_PROGRESS: + return + + # todo - report errors for failure statuses here and in run ? + if step.type == "tool_calls" and isinstance(step.step_details, RunStepToolCallDetails): + self.instrumentor._add_tool_assistant_message_event(self.span, step) + elif step.type == "message_creation" and step.status == RunStepStatus.COMPLETED: + self.instrumentor.add_thread_message_event(self.span, self.last_message, step.usage) + self.last_message = None + + def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: + if self.inner_handler: + self.inner_handler.on_run_step_delta(delta) + + def on_error(self, data: str) -> None: + if self.inner_handler: + self.inner_handler.on_error(data) + + def on_done(self) -> None: + if self.inner_handler: + self.inner_handler.on_done() + # it could be called multiple tines (for each step) __exit__ + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + if self.inner_handler: + self.inner_handler.on_unhandled_event(event_type, event_data) + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.ended: + self.ended = True + self.instrumentor.set_end_run(self.span, self.last_run) + + if self.last_run.last_error: + self.span.set_status(StatusCode.ERROR, self.last_run.last_error.message) + self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) + + self.span.__exit__(exc_type, exc_val, exc_tb) + self.span.finish() \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py new file mode 100644 index 000000000000..524ebad14e2f --- /dev/null +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py @@ -0,0 +1,138 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from enum import Enum +from typing import Optional + +from azure.core.tracing import SpanKind # type: ignore +from azure.core.settings import settings # type: ignore + +try: + # pylint: disable = no-name-in-module + from azure.core.tracing import AbstractSpan, SpanKind # type: ignore + from opentelemetry.trace import StatusCode, Span + + _span_impl_type = settings.tracing_implementation() +except ModuleNotFoundError: + _span_impl_type = None + + +GEN_AI_MESSAGE_ID = "gen_ai.message.id" +GEN_AI_MESSAGE_STATUS = "gen_ai.message.status" +GEN_AI_THREAD_ID = "gen_ai.thread.id" +GEN_AI_THREAD_RUN_ID = "gen_ai.thread.run.id" +GEN_AI_AGENT_ID = "gen_ai.agent.id" +GEN_AI_AGENT_NAME = "gen_ai.agent.name" +GEN_AI_AGENT_DESCRIPTION = "gen_ai.agent.description" +GEN_AI_OPERATION_NAME = "gen_ai.operation.name" +GEN_AI_THREAD_RUN_STATUS = "gen_ai.thread.run.status" +GEN_AI_REQUEST_MODEL = "gen_ai.request.model" +GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature" +GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p" +GEN_AI_REQUEST_MAX_INPUT_TOKENS = "gen_ai.request.max_input_tokens" +GEN_AI_REQUEST_MAX_OUTPUT_TOKENS = "gen_ai.request.max_output_tokens" +GEN_AI_RESPONSE_MODEL = "gen_ai.response.model" +GEN_AI_SYSTEM = "gen_ai.system" +SERVER_ADDRESS = "server.address" +AZ_AI_AGENT_SYSTEM = "az.ai.agents" +GEN_AI_TOOL_NAME = "gen_ai.tool.name" +GEN_AI_TOOL_CALL_ID = "gen_ai.tool.call.id" +GEN_AI_REQUEST_RESPONSE_FORMAT = "gen_ai.request.response_format" +GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens" +GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens" +GEN_AI_SYSTEM_MESSAGE = "gen_ai.system.message" +GEN_AI_EVENT_CONTENT = "gen_ai.event.content" +ERROR_TYPE = "error.type" + + +class OperationName(Enum): + CREATE_AGENT = "create_agent" + CREATE_THREAD = "create_thread" + CREATE_MESSAGE = "create_message" + START_THREAD_RUN = "start_thread_run" + EXECUTE_TOOL = "execute_tool" + LIST_MESSAGES = "list_messages" + SUBMIT_TOOL_OUTPUTS = "submit_tool_outputs" + PROCESS_THREAD_RUN = "process_thread_run" + + +def trace_tool_execution( + tool_call_id: str, + tool_name: str, + thread_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow + agent_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow + run_id: Optional[str] = None # TODO: would be nice to have this, but need to propagate somehow +) -> "AbstractSpan": + span = start_span(OperationName.EXECUTE_TOOL, + server_address=None, + span_name=f"execute_tool {tool_name}", + thread_id=thread_id, + agent_id=agent_id, + run_id=run_id, + gen_ai_system=None) # it's a client code execution, not GenAI span + if span is not None and span.span_instance.is_recording: + span.add_attribute(GEN_AI_TOOL_CALL_ID, tool_call_id) + span.add_attribute(GEN_AI_TOOL_NAME, tool_name) + + return span + +def start_span( + operation_name: OperationName, + server_address: str, + span_name: str = None, + thread_id: str = None, + agent_id: str = None, + run_id: str = None, + model: str = None, + temperature: str = None, + top_p: str = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + response_format: Optional[str] = None, + gen_ai_system: str = AZ_AI_AGENT_SYSTEM, + kind: SpanKind = SpanKind.CLIENT +) -> "AbstractSpan": + if _span_impl_type is None: + return None + + span = _span_impl_type(name=span_name or operation_name.value, kind=kind) + + if span and span.span_instance.is_recording: + if gen_ai_system: + span.add_attribute(GEN_AI_SYSTEM, AZ_AI_AGENT_SYSTEM) + + span.add_attribute(GEN_AI_OPERATION_NAME, operation_name.value) + + if server_address: + span.add_attribute(SERVER_ADDRESS, server_address) + + if thread_id: + span.add_attribute(GEN_AI_THREAD_ID, thread_id) + + if agent_id: + span.add_attribute(GEN_AI_AGENT_ID, agent_id) + + if run_id: + span.add_attribute(GEN_AI_THREAD_RUN_ID, run_id) + + if model: + span.add_attribute(GEN_AI_REQUEST_MODEL, model) + + if temperature: + span.add_attribute(GEN_AI_REQUEST_TEMPERATURE, temperature) + + if top_p: + span.add_attribute(GEN_AI_REQUEST_TOP_P, top_p) + + if max_prompt_tokens: + span.add_attribute(GEN_AI_REQUEST_MAX_INPUT_TOKENS, max_prompt_tokens) + + if max_completion_tokens: + span.add_attribute(GEN_AI_REQUEST_MAX_OUTPUT_TOKENS, max_completion_tokens) + + if response_format: + span.add_attribute(GEN_AI_REQUEST_RESPONSE_FORMAT, response_format) + + return span diff --git a/sdk/ai/azure-ai-projects/dev_requirements.txt b/sdk/ai/azure-ai-projects/dev_requirements.txt index c82827bb56f4..445b9b5aaf20 100644 --- a/sdk/ai/azure-ai-projects/dev_requirements.txt +++ b/sdk/ai/azure-ai-projects/dev_requirements.txt @@ -1,4 +1,5 @@ -e ../../../tools/azure-sdk-tools ../../core/azure-core ../../identity/azure-identity +../../core/azure-core-tracing-opentelemetry aiohttp \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..8520028205e4 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py @@ -0,0 +1,93 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_basics_async_with_azure_monitor_tracing.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a asynchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Studio project page. + +USAGE: + python sample_agents_basics_async_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + pip install azure.monitor.opentelemetry + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" +import asyncio +import time +import sys +from azure.ai.projects.aio import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.tracing.agents import AIAgentsInstrumentor +from opentelemetry import trace +import os +from azure.monitor.opentelemetry import configure_azure_monitor + + +tracer = trace.get_tracer(__name__) + +@tracer.start_as_current_span(__file__) +async def main(): + + # Create an Azure AI Project Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + # Enable Azure Monitor tracing + application_insights_connection_string = project_client.telemetry.get_connection_string() + if not application_insights_connection_string: + print("Application Insights was not enabled for this project.") + print("Enable it via the 'Tracing' tab in your AI Studio project page.") + exit() + configure_azure_monitor(connection_string=application_insights_connection_string) + + async with project_client: + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + + # poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py new file mode 100644 index 000000000000..945d5e7018c2 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py @@ -0,0 +1,85 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_basics_async_with_console_tracing.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a asynchronous client with tracing to console. + +USAGE: + python sample_agents_basics_async_with_console_tracing.py + + Before running the sample: + + pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" +import asyncio +import time +import sys +from azure.ai.projects.aio import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.tracing.agents import AIAgentsInstrumentor +from opentelemetry import trace +import os + + +tracer = trace.get_tracer(__name__) + +@tracer.start_as_current_span(__file__) +async def main(): + + # Create an Azure AI Project Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + # Enable console tracing + project_client.telemetry.enable(destination=sys.stdout) + + async with project_client: + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + + # poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..c2c1033a2ed4 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py @@ -0,0 +1,81 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_basics_with_azure_monitor_tracing.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a synchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Studio project page. + +USAGE: + python sample_agents_basics_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + pip install azure.monitor.opentelemetry + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, sys, time +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +# Create an AI Project Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Enable Azure Monitor tracing +application_insights_connection_string = project_client.telemetry.get_connection_string() +if not application_insights_connection_string: + print("Application Insights was not enabled for this project.") + print("Enable it via the 'Tracing' tab in your AI Studio project page.") + exit() +configure_azure_monitor(connection_string=application_insights_connection_string) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + + # poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing.py new file mode 100644 index 000000000000..6138cf1765d5 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing.py @@ -0,0 +1,73 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_basics_with_console_tracing.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations from + the Azure Agents service using a synchronous client with tracing to console. + +USAGE: + python sample_agents_basics_with_console_tracing.py + + Before running the sample: + + pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, sys, time +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from opentelemetry import trace + +# Create an AI Project Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Enable console tracing +project_client.telemetry.enable(destination=sys.stdout) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + + # poll the run as long as run status is queued or in progress + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + print(f"Run status: {run.status}") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..48eeb0b8749e --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py @@ -0,0 +1,150 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_functions_with_azure_monitor_tracing.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations with function tools from + the Azure Agents service using a synchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Studio project page. + +USAGE: + python sample_agents_basics_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + pip install azure.monitor.opentelemetry + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, sys,time, json +from typing import Any, Callable, Set +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import FunctionTool, SubmitToolOutputsAction, RequiredFunctionToolCall +from user_functions import user_functions +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + + +# Create an AI Project Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +# Enable Azure Monitor tracing +application_insights_connection_string = project_client.telemetry.get_connection_string() +if not application_insights_connection_string: + print("Application Insights was not enabled for this project.") + print("Enable it via the 'Tracing' tab in your AI Studio project page.") + exit() +configure_azure_monitor(connection_string=application_insights_connection_string) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +# The tracer.start_as_current_span decorator will trace the function call and enable adding additional attributes +# to the span in the function implementation. Note that this will trace the function parameters and their values. +@tracer.start_as_current_span("fetch_weather") # type: ignore +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + + # Adding attributes to the current span + span = trace.get_current_span() + span.set_attribute("requested_location", location) + + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_weather, +} + +# Initialize function tool with user function +functions = FunctionTool(functions=user_functions) + +with tracer.start_as_current_span(scenario): + with project_client: + # Create an agent and run user's request with function calls + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + print(f"Created agent, ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, what is the weather in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, ID: {run.id}") + + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + project_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append({ + "tool_call_id": tool_call.id, + "output": output, + }) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + project_client.agents.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the agent when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py new file mode 100644 index 000000000000..2b3169e856d1 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py @@ -0,0 +1,142 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_functions_with_console_tracing.py + +DESCRIPTION: + This sample demonstrates how to use basic agent operations with function tools from + the Azure Agents service using a synchronous client with tracing to console. + +USAGE: + python sample_agents_basics_with_console_tracing.py + + Before running the sample: + + pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. +""" + +import os, sys,time, json +from typing import Any, Callable, Set +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import FunctionTool, SubmitToolOutputsAction, RequiredFunctionToolCall +from user_functions import user_functions +from opentelemetry import trace + + +# Create an AI Project Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +# Enable console tracing +project_client.telemetry.enable(destination=sys.stdout) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +# The tracer.start_as_current_span decorator will trace the function call and enable adding additional attributes +# to the span in the function implementation. Note that this will trace the function parameters and their values. +@tracer.start_as_current_span("fetch_weather") # type: ignore +def fetch_weather(location: str) -> str: + """ + Fetches the weather information for the specified location. + + :param location (str): The location to fetch weather for. + :return: Weather information as a JSON string. + :rtype: str + """ + # In a real-world scenario, you'd integrate with a weather API. + # Here, we'll mock the response. + mock_weather_data = {"New York": "Sunny, 25°C", "London": "Cloudy, 18°C", "Tokyo": "Rainy, 22°C"} + + # Adding attributes to the current span + span = trace.get_current_span() + span.set_attribute("requested_location", location) + + weather = mock_weather_data.get(location, "Weather data not available for this location.") + weather_json = json.dumps({"weather": weather}) + return weather_json + +# Statically defined user functions for fast reference +user_functions: Set[Callable[..., Any]] = { + fetch_weather, +} + +# Initialize function tool with user function +functions = FunctionTool(functions=user_functions) + +with tracer.start_as_current_span(scenario): + with project_client: + # Create an agent and run user's request with function calls + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a helpful assistant", + tools=functions.definitions, + ) + print(f"Created agent, ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, what is the weather in New York?", + ) + print(f"Created message, ID: {message.id}") + + run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, ID: {run.id}") + + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) + + if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction): + tool_calls = run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print("No tool calls provided - cancelling run") + project_client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + break + + tool_outputs = [] + for tool_call in tool_calls: + if isinstance(tool_call, RequiredFunctionToolCall): + try: + output = functions.execute(tool_call) + tool_outputs.append({ + "tool_call_id": tool_call.id, + "output": output, + }) + except Exception as e: + print(f"Error executing tool_call {tool_call.id}: {e}") + + print(f"Tool outputs: {tool_outputs}") + if tool_outputs: + project_client.agents.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs) + + print(f"Current run status: {run.status}") + + print(f"Run completed with status: {run.status}") + + # Delete the agent when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py new file mode 100644 index 000000000000..63ffcf7b793a --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py @@ -0,0 +1,117 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_with_azure_monitor_tracing.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler in streaming from + the Azure Agents service using a synchronous client with Azure Monitor tracing. + View the results in the "Tracing" tab in your Azure AI Studio project page. + +USAGE: + python sample_agents_stream_eventhandler_with_azure_monitor_tracing.py + + Before running the sample: + + pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + pip install azure.monitor.opentelemetry + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. + +""" + +import os,sys +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models._enums import RunStepType +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import ( + AgentEventHandler, + MessageDeltaTextContent, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) +from typing import Any +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +# Create an Azure AI Project Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + + +class MyEventHandler(AgentEventHandler): + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"Message status: {message.status}, Content: {message.content[0].as_dict()}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + +# Enable Azure Monitor tracing +application_insights_connection_string = project_client.telemetry.get_connection_string() +if not application_insights_connection_string: + print("Application Insights was not enabled for this project.") + print("Enable it via the 'Tracing' tab in your AI Studio project page.") + exit() +configure_azure_monitor(connection_string=application_insights_connection_string) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with project_client: + # Create an agent and run stream with event handler + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created agent, agent ID {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + with project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py new file mode 100644 index 000000000000..d98b91021e46 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py @@ -0,0 +1,110 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_stream_eventhandler_with_console_tracing.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with an event handler in streaming from + the Azure Agents service using a synchronous client with tracing to console. + +USAGE: + python sample_agents_stream_eventhandler_with_console_tracing.py + + Before running the sample: + + pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + + Set these environment variables with your own values: + * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. + * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + messages, which may contain personal data. False by default. + +""" + +import os,sys +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models._enums import RunStepType +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import ( + AgentEventHandler, + MessageDeltaTextContent, + MessageDeltaChunk, + ThreadMessage, + ThreadRun, + RunStep, +) +from typing import Any +from opentelemetry import trace + + +# Create an Azure AI Project Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + + +class MyEventHandler(AgentEventHandler): + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"Message status: {message.status}, Content: {message.content[0].as_dict()}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + +# Enable console tracing +project_client.telemetry.enable(destination=sys.stdout) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with project_client: + # Create an agent and run stream with event handler + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant" + ) + print(f"Created agent, agent ID {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID {thread.id}") + + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + print(f"Created message, message ID {message.id}") + + with project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") From 6c3da41b6511ab221c9105ec99c73265cb55f6e3 Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Thu, 31 Oct 2024 14:45:52 -0800 Subject: [PATCH 070/138] Jhakulin/user functions updates (#38248) * update to get file content * agents async updates * update * fix await/pylance issue * fix pylance error on AsyncToolSet * return empty set if no tool resources for code interpreter * added async toolset sample * updates to user functions * update * fix the array type for parameters --- .../azure/ai/projects/models/_patch.py | 87 ++++++++--- .../samples/agents/sample_agents_functions.py | 1 + .../samples/agents/user_functions.py | 135 +++++++++++++++++- 3 files changed, 202 insertions(+), 21 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index e7cd6b5ad9e0..6c2fa9af95b9 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -15,6 +15,7 @@ import logging import base64 import asyncio +import re from azure.core.credentials import TokenCredential, AccessToken @@ -58,7 +59,7 @@ Tuple, Set, get_origin, - Union, + get_args, ) logger = logging.getLogger(__name__) @@ -241,26 +242,33 @@ def get_token(self) -> AccessToken: "datetime": "string", # Use format "date-time" "date": "string", # Use format "date" "UUID": "string", # Use format "uuid" + "list": "array", + "dict": "object", } -def _map_type(annotation) -> str: +def _map_type(annotation) -> Dict[str, Any]: if annotation == inspect.Parameter.empty: - return "string" # Default type if annotation is missing + return {"type": "string"} # Default type if annotation is missing origin = get_origin(annotation) if origin in {list, List}: - return "array" + args = get_args(annotation) + item_type = args[0] if args else str + return { + "type": "array", + "items": {"type": type_map.get(item_type.__name__, "string")} + } elif origin in {dict, Dict}: - return "object" + return {"type": "object"} elif hasattr(annotation, "__name__"): - return type_map.get(annotation.__name__, "string") + return {"type": type_map.get(annotation.__name__, "string")} elif isinstance(annotation, type): - return type_map.get(annotation.__name__, "string") + return {"type": type_map.get(annotation.__name__, "string")} - return "string" # Fallback to "string" if type is unrecognized + return {"type": "string"} # Fallback to "string" if type is unrecognized class Tool(ABC): @@ -305,30 +313,63 @@ def __init__(self, functions: Set[Callable[..., Any]]): self._definitions = self._build_function_definitions(self._functions) def _create_function_dict(self, functions: Set[Callable[..., Any]]) -> Dict[str, Callable[..., Any]]: - func_dict = {func.__name__: func for func in functions} - return func_dict + return {func.__name__: func for func in functions} def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDefinition]: specs = [] + # Flexible regex to capture ':param : ' + param_pattern = re.compile( + r""" + ^\s* # Optional leading whitespace + :param # Literal ':param' + \s+ # At least one whitespace character + (?P[^:\s\(\)]+) # Parameter name (no spaces, colons, or parentheses) + (?:\s*\(\s*(?P[^)]+?)\s*\))? # Optional type in parentheses, allowing internal spaces + \s*:\s* # Colon ':' surrounded by optional whitespace + (?P.+) # Description (rest of the line) + """, + re.VERBOSE + ) + for name, func in functions.items(): sig = inspect.signature(func) params = sig.parameters - docstring = inspect.getdoc(func) + docstring = inspect.getdoc(func) or "" description = docstring.split("\n")[0] if docstring else "No description" + param_descs = {} + for line in docstring.splitlines(): + line = line.strip() + match = param_pattern.match(line) + if match: + groups = match.groupdict() + param_name = groups.get('name') + param_desc = groups.get('description') + param_desc = param_desc.strip() if param_desc else "No description" + param_descs[param_name] = param_desc.strip() + properties = {} for param_name, param in params.items(): - param_type = _map_type(param.annotation) - param_description = param.annotation.__doc__ if param.annotation != inspect.Parameter.empty else None - properties[param_name] = {"type": param_type, "description": param_description} + param_type_info = _map_type(param.annotation) + param_description = param_descs.get(param_name, "No description") + + properties[param_name] = { + **param_type_info, + "description": param_description + } function_def = FunctionDefinition( name=name, description=description, - parameters={"type": "object", "properties": properties, "required": list(params.keys())}, + parameters={ + "type": "object", + "properties": properties, + "required": list(params.keys()) + }, ) tool_def = FunctionToolDefinition(function=function_def) specs.append(tool_def) + return specs def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, Dict[str, Any]]: @@ -380,8 +421,11 @@ def execute(self, tool_call: RequiredFunctionToolCall) -> Any: try: return function(**parsed_arguments) if parsed_arguments else function() except TypeError as e: - logging.error(f"Error executing function '{tool_call.function.name}': {e}") - raise + error_message = f"Error executing function '{tool_call.function.name}': {e}" + logging.error(error_message) + # Return error message as JSON string back to agent in order to make possible self correction to the function call + return json.dumps({"error": error_message}) + class AsyncFunctionTool(BaseFunctionTool): @@ -395,8 +439,10 @@ async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: else: return function(**parsed_arguments) if parsed_arguments else function() except TypeError as e: - logging.error(f"Error executing function '{tool_call.function.name}': {e}") - raise + error_message = f"Error executing function '{tool_call.function.name}': {e}" + logging.error(error_message) + # Return error message as JSON string back to agent in order to make possible self correction to the function call + return json.dumps({"error": error_message}) class FileSearchTool(Tool): @@ -502,6 +548,7 @@ def resources(self) -> ToolResources: def execute(self, tool_call: Any) -> Any: pass + class BaseToolSet: """ Abstract class for a collection of tools that can be used by an agent. @@ -600,6 +647,7 @@ def get_tool(self, tool_type: Type[Tool]) -> Tool: return tool raise ValueError(f"Tool of type {tool_type.__name__} not found.") + class ToolSet(BaseToolSet): """ A collection of tools that can be used by an synchronize agent. @@ -617,7 +665,6 @@ def validate_tool_type(self, tool: Tool) -> None: "AsyncFunctionTool is not supported in ToolSet. To use async functions, use AsyncToolSet and agents operations in azure.ai.projects.aio." ) - def execute_tool_calls(self, tool_calls: List[Any]) -> Any: """ Execute a tool of the specified type with the provided tool calls. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py index d7f40fa718d1..8f3ddd4a778e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py @@ -76,6 +76,7 @@ for tool_call in tool_calls: if isinstance(tool_call, RequiredFunctionToolCall): try: + print(f"Executing tool call: {tool_call}") output = functions.execute(tool_call) tool_outputs.append( { diff --git a/sdk/ai/azure-ai-projects/samples/agents/user_functions.py b/sdk/ai/azure-ai-projects/samples/agents/user_functions.py index 25f7be372ef7..ebad651932f6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/user_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/user_functions.py @@ -5,7 +5,7 @@ import json import datetime -from typing import Any, Callable, Set +from typing import Any, Callable, Set, Dict, List # These are the user-defined functions that can be called by the agent. @@ -58,9 +58,142 @@ def send_email(recipient: str, subject: str, body: str) -> str: return message_json +def calculate_sum(a: int, b: int) -> str: + """Calculates the sum of two integers. + + :param a (int): First integer. + :rtype: int + :param b (int): Second integer. + :rtype: int + + :return: The sum of the two integers. + :rtype: str + """ + result = a + b + return json.dumps({"result": result}) + + +def convert_temperature(celsius: float) -> str: + """Converts temperature from Celsius to Fahrenheit. + + :param celsius (float): Temperature in Celsius. + :rtype: float + + :return: Temperature in Fahrenheit. + :rtype: str + """ + fahrenheit = (celsius * 9/5) + 32 + return json.dumps({"fahrenheit": fahrenheit}) + + +def toggle_flag(flag: bool) -> str: + """Toggles a boolean flag. + + :param flag (bool): The flag to toggle. + :rtype: bool + + :return: The toggled flag. + :rtype: str + """ + toggled = not flag + return json.dumps({"toggled_flag": toggled}) + + +def merge_dicts(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> str: + """Merges two dictionaries. + + :param dict1 (Dict[str, Any]): First dictionary. + :rtype: dict + :param dict2 (Dict[str, Any]): Second dictionary. + :rtype: dict + + :return: The merged dictionary. + :rtype: str + """ + merged = dict1.copy() + merged.update(dict2) + return json.dumps({"merged_dict": merged}) + + +def get_user_info(user_id: int) -> str: + """Retrieves user information based on user ID. + + :param user_id (int): ID of the user. + :rtype: int + + :return: User information as a JSON string. + :rtype: str + """ + mock_users = { + 1: {"name": "Alice", "email": "alice@example.com"}, + 2: {"name": "Bob", "email": "bob@example.com"}, + 3: {"name": "Charlie", "email": "charlie@example.com"}, + } + user_info = mock_users.get(user_id, {"error": "User not found."}) + return json.dumps({"user_info": user_info}) + + +def longest_word_in_sentences(sentences: List[str]) -> str: + """Finds the longest word in each sentence. + + :param sentences (List[str]): A list of sentences. + :return: A JSON string mapping each sentence to its longest word. + :rtype: str + """ + if not sentences: + return json.dumps({"error": "The list of sentences is empty"}) + + longest_words = {} + for sentence in sentences: + # Split sentence into words + words = sentence.split() + if words: + # Find the longest word + longest_word = max(words, key=len) + longest_words[sentence] = longest_word + else: + longest_words[sentence] = "" + + return json.dumps({"longest_words": longest_words}) + + +# Example Questions for Each Function +# 1. Fetch Current DateTime +# Question: "What is the current date and time?" + +# 2. Fetch Weather +# Question: "Can you provide the weather information for New York?" + +# 3. Send Email +# Question: "Send an email to john.doe@example.com with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" + +# 4. Calculate Sum +# Question: "What is the sum of 45 and 55?" + +# 5. Convert Temperature +# Question: "Convert 25 degrees Celsius to Fahrenheit." + +# 6. Toggle Flag +# Question: "Toggle the flag True." + +# 7. Merge Dictionaries +# Question: "Merge these two dictionaries: {'name': 'Alice'} and {'age': 30}." + +# 8. Get User Info +# Question: "Retrieve user information for user ID 1." + +# 9. Longest Word in Sentences +# Question: "Find the longest word in each of these sentences: ['The quick brown fox jumps over the lazy dog', 'Python is an amazing programming language', 'Azure AI capabilities are impressive']." + # Statically defined user functions for fast reference user_functions: Set[Callable[..., Any]] = { fetch_current_datetime, fetch_weather, send_email, + calculate_sum, + convert_temperature, + toggle_flag, + merge_dicts, + get_user_info, + longest_word_in_sentences, } From 40f27f1f8ab096fc3d11908b06abec902134c2fa Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 31 Oct 2024 18:43:09 -0700 Subject: [PATCH 071/138] Connections & Inference test updates to support new unified models --- .../azure_ai_projects_tests.env | 49 ++++++++++++++----- ...pletions_with_azure_ai_inference_client.py | 1 - .../tests/connections/connection_test_base.py | 6 +-- .../tests/connections/test_connections.py | 28 +++++------ .../connections/test_connections_async.py | 26 +++++----- .../tests/inference/inference_test_base.py | 7 +-- .../tests/inference/test_inference.py | 7 ++- .../tests/inference/test_inference_async.py | 7 ++- .../tests/telemetry/telemetry_test_base.py | 2 +- 9 files changed, 82 insertions(+), 51 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env index c2cf5fdd5914..b26cf7bd11b0 100644 --- a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env +++ b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env @@ -6,24 +6,51 @@ AZURE_TEST_RUN_LIVE=true AZURE_SKIP_LIVE_RECORDING=true -# To run Connection tests: +######################################################################################################################## +# Connection tests +# +# To run connection tests you need an AI Studio project with +# - At least one AIServices resource connected +# - At lease one Azure OpenAI resource connected +# and you will need to define the 5 environment varabled below. +# Ideally you have more than one AIServices and Azure OpenAI resources connected, +# such that you set a connection name that is different than the default connection name. +# AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING= AZURE_AI_PROJECTS_CONNECTIONS_TESTS_DEFAULT_AOAI_CONNECTION_NAME= -AZURE_AI_PROJECTS_CONNECTIONS_TESTS_DEFAULT_SERVERLESS_CONNECTION_NAME= -AZURE_AI_PROJECTS_CONNECTIONS_TESTS_AOAI_CONNECTION_NAME= -AZURE_AI_PROJECTS_CONNECTIONS_TESTS_SERVERLESS_CONNECTION_NAME= +AZURE_AI_PROJECTS_CONNECTIONS_TESTS_DEFAULT_AISERVICES_CONNECTION_NAME= +AZURE_AI_PROJECTS_CONNECTIONS_TESTS_AOAI_CONNECTION_NAME=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_DEFAULT_AOAI_CONNECTION_NAME} +AZURE_AI_PROJECTS_CONNECTIONS_TESTS_AISERVICES_CONNECTION_NAME=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_DEFAULT_AISERVICES_CONNECTION_NAME} + -# To run Inference tests: +######################################################################################################################## +# Inference tests +# +# To run inference tests you need an AI Studio project with +# - A default AIServices resource with at least one chat-completions model deployed (from OpenAI or non-OpenAI) +# - A default Azure OpenAI resource connected with at least one chat-completions OpenAI model deployed +# Populate the Azure OpenAI api-version and model deployment names below. +# Note: See Azure OpenAI api-versions here: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs +# AZURE_AI_PROJECTS_INFERENCE_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING} -AZURE_AI_PROJECTS_INFERENCE_TESTS_MODEL_DEPLOYMENT_NAME= -# This will be removed soon -USE_SERVERLESS_CONNECTION=true +AZURE_AI_PROJECTS_INFERENCE_TESTS_AOAI_API_VERSION= +AZURE_AI_PROJECTS_INFERENCE_TESTS_AOAI_MODEL_DEPLOYMENT_NAME= +AZURE_AI_PROJECTS_INFERENCE_TESTS_AISERVICES_MODEL_DEPLOYMENT_NAME= + -# To run Telemetry tests: +######################################################################################################################## +# Telemetry tests +# AZURE_AI_PROJECTS_TELEMETRY_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING} -# To run Agents tests: -# To run Evaluation tests: +######################################################################################################################## +# Agents tests +# + + +######################################################################################################################## +# Telemetry tests +# diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py index 693aeef4706d..9e3ef43786a4 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py @@ -11,7 +11,6 @@ USAGE: python sample_get_chat_completions_client.py - python sample_get_chat_completions_client.py Before running the sample: diff --git a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py index 7dd4c40a7219..6072048658da 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py @@ -15,11 +15,11 @@ servicePreparerConnectionsTests = functools.partial( EnvironmentVariableLoader, "azure_ai_projects_connections_tests", - azure_ai_projects_connections_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;hub-name", + azure_ai_projects_connections_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;project-name", azure_ai_projects_connections_tests_default_aoai_connection_name="default-aoai-connection-name", - azure_ai_projects_connections_tests_default_serverless_connection_name="default-serverless-connection-name", + azure_ai_projects_connections_tests_default_aiservices_connection_name="default-aiservices-connection-name", azure_ai_projects_connections_tests_aoai_connection_name="aoai-connection-name", - azure_ai_projects_connections_tests_serverless_connection_name="serverless-connection-name", + azure_ai_projects_connections_tests_aiservices_connection_name="aiservices-connection-name", ) # Set to True to enable SDK logging diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py index 470d32779f33..d4a6f6cfdf14 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py @@ -16,7 +16,7 @@ class TestConnections(ConnectionsTestBase): def test_connections_get(self, **kwargs): aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_aoai_connection_name") - serverless_connection = kwargs.pop("azure_ai_projects_connections_tests_serverless_connection_name") + aiservices_connection = kwargs.pop("azure_ai_projects_connections_tests_aiservices_connection_name") with self.get_sync_client(**kwargs) as project_client: @@ -28,8 +28,6 @@ def test_connections_get(self, **kwargs): connection_name="Some non-existing name", with_credentials=True ) == None - return - connection = project_client.connections.get(connection_name=aoai_connection, with_credentials=False) print(connection) ConnectionsTestBase.validate_connection( @@ -48,22 +46,22 @@ def test_connections_get(self, **kwargs): expected_connection_type=ConnectionType.AZURE_OPEN_AI, ) - connection = project_client.connections.get(connection_name=serverless_connection, with_credentials=False) + connection = project_client.connections.get(connection_name=aiservices_connection, with_credentials=False) print(connection) ConnectionsTestBase.validate_connection( connection, False, - expected_connection_name=serverless_connection, - expected_connection_type=ConnectionType.SERVERLESS, + expected_connection_name=aiservices_connection, + expected_connection_type=ConnectionType.AZURE_AI_SERVICES, ) - connection = project_client.connections.get(connection_name=serverless_connection, with_credentials=True) + connection = project_client.connections.get(connection_name=aiservices_connection, with_credentials=True) print(connection) ConnectionsTestBase.validate_connection( connection, True, - expected_connection_name=serverless_connection, - expected_connection_type=ConnectionType.SERVERLESS, + expected_connection_name=aiservices_connection, + expected_connection_type=ConnectionType.AZURE_AI_SERVICES, ) @servicePreparerConnectionsTests() @@ -72,7 +70,7 @@ def test_connections_get_default(self, **kwargs): default_aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_default_aoai_connection_name") default_serverless_connection = kwargs.pop( - "azure_ai_projects_connections_tests_default_serverless_connection_name" + "azure_ai_projects_connections_tests_default_aiservices_connection_name" ) with self.get_sync_client(**kwargs) as project_client: @@ -108,25 +106,25 @@ def test_connections_get_default(self, **kwargs): ) connection = project_client.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=False + connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=False ) print(connection) ConnectionsTestBase.validate_connection( connection, False, expected_connection_name=default_serverless_connection, - expected_connection_type=ConnectionType.SERVERLESS, + expected_connection_type=ConnectionType.AZURE_AI_SERVICES, ) connection = project_client.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True + connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True ) print(connection) ConnectionsTestBase.validate_connection( connection, True, expected_connection_name=default_serverless_connection, - expected_connection_type=ConnectionType.SERVERLESS, + expected_connection_type=ConnectionType.AZURE_AI_SERVICES, ) @servicePreparerConnectionsTests() @@ -151,7 +149,7 @@ def test_connections_list(self, **kwargs): ConnectionsTestBase.validate_connection(connection, False) connections = project_client.connections.list( - connection_type=ConnectionType.SERVERLESS, + connection_type=ConnectionType.AZURE_AI_SERVICES, ) count_serverless = len(connections) print("====> Listing of all Serverless connections (found {count_serverless}):") diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py index 7246ccefbfab..32b65a09edf1 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py @@ -15,7 +15,7 @@ class TestConnectionsAsync(ConnectionsTestBase): @recorded_by_proxy_async async def test_connections_get_async(self, **kwargs): aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_aoai_connection_name") - serverless_connection = kwargs.pop("azure_ai_projects_connections_tests_serverless_connection_name") + aiservices_connection = kwargs.pop("azure_ai_projects_connections_tests_aiservices_connection_name") async with self.get_async_client(**kwargs) as project_client: @@ -46,25 +46,25 @@ async def test_connections_get_async(self, **kwargs): ) connection = await project_client.connections.get( - connection_name=serverless_connection, with_credentials=False + connection_name=aiservices_connection, with_credentials=False ) print(connection) ConnectionsTestBase.validate_connection( connection, False, - expected_connection_name=serverless_connection, - expected_connection_type=ConnectionType.SERVERLESS, + expected_connection_name=aiservices_connection, + expected_connection_type=ConnectionType.AZURE_AI_SERVICES, ) connection = await project_client.connections.get( - connection_name=serverless_connection, with_credentials=True + connection_name=aiservices_connection, with_credentials=True ) print(connection) ConnectionsTestBase.validate_connection( connection, True, - expected_connection_name=serverless_connection, - expected_connection_type=ConnectionType.SERVERLESS, + expected_connection_name=aiservices_connection, + expected_connection_type=ConnectionType.AZURE_AI_SERVICES, ) @servicePreparerConnectionsTests() @@ -73,7 +73,7 @@ async def test_connections_get_default_async(self, **kwargs): default_aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_default_aoai_connection_name") default_serverless_connection = kwargs.pop( - "azure_ai_projects_connections_tests_default_serverless_connection_name" + "azure_ai_projects_connections_tests_default_aiservices_connection_name" ) async with self.get_async_client(**kwargs) as project_client: @@ -109,25 +109,25 @@ async def test_connections_get_default_async(self, **kwargs): ) connection = await project_client.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=False + connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=False ) print(connection) ConnectionsTestBase.validate_connection( connection, False, expected_connection_name=default_serverless_connection, - expected_connection_type=ConnectionType.SERVERLESS, + expected_connection_type=ConnectionType.AZURE_AI_SERVICES, ) connection = await project_client.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True + connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True ) print(connection) ConnectionsTestBase.validate_connection( connection, True, expected_connection_name=default_serverless_connection, - expected_connection_type=ConnectionType.SERVERLESS, + expected_connection_type=ConnectionType.AZURE_AI_SERVICES, ) @servicePreparerConnectionsTests() @@ -152,7 +152,7 @@ async def test_connections_list_async(self, **kwargs): ConnectionsTestBase.validate_connection(connection, False) connections = await project_client.connections.list( - connection_type=ConnectionType.SERVERLESS, + connection_type=ConnectionType.AZURE_AI_SERVICES, ) count_serverless = len(connections) print("====> Listing of all Serverless connections (found {count_serverless}):") diff --git a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py index 9f644715e861..4c3f3687168f 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py @@ -12,11 +12,12 @@ servicePreparerInferenceTests = functools.partial( EnvironmentVariableLoader, "azure_ai_projects_inference_tests", - azure_ai_projects_inference_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;hub-name", - azure_ai_projects_inference_tests_model_deployment_name="model-deployment-name", + azure_ai_projects_inference_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;project-name", + azure_ai_projects_inference_tests_aoai_api_version="aoai-api-version", + azure_ai_projects_inference_tests_aoai_model_deployment_name="aoai-model-deployment-name", + azure_ai_projects_inference_tests_aiservices_model_deployment_name="aoai-model-deployment-name", ) - # Set to True to enable SDK logging LOGGING_ENABLED = False diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py index 5403c75a96b4..38163e0070e6 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py @@ -14,11 +14,12 @@ class TestInference(InferenceTestBase): @servicePreparerInferenceTests() @recorded_by_proxy def test_inference_get_azure_openai_client(self, **kwargs): - model = kwargs.pop("azure_ai_projects_inference_tests_model_deployment_name") + api_version = kwargs.pop("azure_ai_projects_inference_tests_aoai_api_version") + model = kwargs.pop("azure_ai_projects_inference_tests_aoai_model_deployment_name") with self.get_sync_client(**kwargs) as project_client: # See API versions in https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs with project_client.inference.get_azure_openai_client( - api_version="2024-10-01-preview" + api_version=api_version ) as azure_openai_client: response = azure_openai_client.chat.completions.create( messages=[ @@ -36,9 +37,11 @@ def test_inference_get_azure_openai_client(self, **kwargs): @servicePreparerInferenceTests() @recorded_by_proxy def test_inference_get_chat_completions_client(self, **kwargs): + model = kwargs.pop("azure_ai_projects_inference_tests_aiservices_model_deployment_name") with self.get_sync_client(**kwargs) as project_client: with project_client.inference.get_chat_completions_client() as azure_ai_inference_client: response = azure_ai_inference_client.complete( + model=model, messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py index df1988c411e8..3e0507edf2a8 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py @@ -14,11 +14,12 @@ class TestInferenceAsync(InferenceTestBase): @servicePreparerInferenceTests() @recorded_by_proxy_async async def test_inference_get_azure_openai_client_async(self, **kwargs): - model = kwargs.pop("azure_ai_projects_inference_tests_model_deployment_name") + api_version = kwargs.pop("azure_ai_projects_inference_tests_aoai_api_version") + model = kwargs.pop("azure_ai_projects_inference_tests_aoai_model_deployment_name") async with self.get_async_client(**kwargs) as project_client: # See API versions in https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs async with await project_client.inference.get_azure_openai_client( - api_version="2024-10-01-preview" + api_version=api_version ) as azure_openai_client: response = await azure_openai_client.chat.completions.create( messages=[ @@ -36,9 +37,11 @@ async def test_inference_get_azure_openai_client_async(self, **kwargs): @servicePreparerInferenceTests() @recorded_by_proxy_async async def test_inference_get_chat_completions_client_async(self, **kwargs): + model = kwargs.pop("azure_ai_projects_inference_tests_aiservices_model_deployment_name") async with self.get_async_client(**kwargs) as project_client: async with await project_client.inference.get_chat_completions_client() as azure_ai_inference_client: response = await azure_ai_inference_client.complete( + model=model, messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py b/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py index c4ce9d8de412..607a1aec85f9 100644 --- a/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py @@ -17,7 +17,7 @@ servicePreparerTelemetryTests = functools.partial( EnvironmentVariableLoader, "azure_ai_projects_telemetry_test", - azure_ai_projects_telemetry_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;hub-name", + azure_ai_projects_telemetry_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;project-name", ) # Set to True to enable SDK logging From a73fd8beda20191ce83bb9e9b499fda0f64e8d78 Mon Sep 17 00:00:00 2001 From: Liudmila Molkova Date: Thu, 31 Oct 2024 19:38:42 -0700 Subject: [PATCH 072/138] Improve tracing samples and add docs on telemetry.enable (#38257) * Improve tracig samples and add docs on telemetry.enable * fix typo --- .../azure/ai/projects/operations/_patch.py | 92 +++++++++++-------- ...basics_async_with_azure_monitor_tracing.py | 3 +- ...gents_basics_async_with_console_tracing.py | 11 ++- ...gents_basics_with_azure_monitor_tracing.py | 4 +- ...mple_agents_basics_with_console_tracing.py | 10 +- ...ts_functions_with_azure_monitor_tracing.py | 3 +- ...e_agents_functions_with_console_tracing.py | 10 +- ...eventhandler_with_azure_monitor_tracing.py | 3 +- ...tream_eventhandler_with_console_tracing.py | 11 ++- ...erence_client_and_azure_monitor_tracing.py | 8 +- ...ai_inference_client_and_console_tracing.py | 12 ++- ...openai_client_and_azure_monitor_tracing.py | 10 +- ...azure_openai_client_and_console_tracing.py | 14 ++- 13 files changed, 131 insertions(+), 60 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 3838353a253d..98540cafc8a4 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -63,7 +63,7 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": """ kwargs.setdefault("merge_span", True) - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on + # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on # a separate "Serverless" connection. This is now deprecated. use_serverless_connection : bool = (os.getenv("USE_SERVERLESS_CONNECTION", None) == "true") @@ -132,7 +132,7 @@ def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": """ kwargs.setdefault("merge_span", True) - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on + # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on # a separate "Serverless" connection. This is now deprecated. use_serverless_connection : bool = (os.getenv("USE_SERVERLESS_CONNECTION", None) == "true") @@ -358,15 +358,17 @@ def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) # Internal helper function to enable tracing, used by both sync and async clients -def _enable_telemetry(destination: Union[TextIOWrapper, str], **kwargs) -> None: - """Enable tracing to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) collector. +def _enable_telemetry(destination: Union[TextIOWrapper, str, None], **kwargs) -> None: + """Enable tracing to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) endpoint. :keyword destination: `sys.stdout` for tracing to console output, or a string holding the - endpoint URL of the OpenTelemetry Protocol (OTLP) collector. Required. - :paramtype destination: Union[TextIOWrapper, str] + OpenTelemetry protocol (OTLP) endpoint. + If not provided, this method enables instrumentation, but does not configure OpenTelemetry + SDK to export traces. + :paramtype destination: Union[TextIOWrapper, str, None] """ if isinstance(destination, str): - # `destination`` is the OTLP collector URL + # `destination` is the OTLP endpoint # See: https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html#usage try: from opentelemetry import trace @@ -374,17 +376,14 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str], **kwargs) -> None: from opentelemetry.sdk.trace.export import SimpleSpanProcessor except ModuleNotFoundError as _: raise ModuleNotFoundError( - "OpenTelemetry package is not installed. Please install it using 'pip install opentelemetry-sdk'" + "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" ) try: - from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter except ModuleNotFoundError as _: raise ModuleNotFoundError( - "OpenTelemetry package is not installed. Please install it using 'pip install opentelemetry-exporter-otlp-proto-http'" + "OpenTelemetry OTLP exporter is not installed. Please install it using 'pip install opentelemetry-exporter-otlp-proto-grpc'" ) - from azure.core.settings import settings - - settings.tracing_implementation = "opentelemetry" trace.set_tracer_provider(TracerProvider()) trace.get_tracer_provider().add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint=destination))) @@ -397,19 +396,23 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str], **kwargs) -> None: from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter except ModuleNotFoundError as _: raise ModuleNotFoundError( - "OpenTelemetry package is not installed. Please install it using 'pip install opentelemetry-sdk'" + "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" ) - from azure.core.settings import settings - - settings.tracing_implementation = "opentelemetry" trace.set_tracer_provider(TracerProvider()) trace.get_tracer_provider().add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) else: raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIOWrapper`") - else: - raise ValueError("Destination must be a string or a `TextIOWrapper` object") # Silently try to load a set of relevant Instrumentors + try: + from azure.core.settings import settings + settings.tracing_implementation = "opentelemetry" + _ = settings.tracing_implementation() + except ModuleNotFoundError as _: + logger.warning( + "Azure SDK tracing plugin is not installed. Please install it using 'pip install azure-core-tracing-opentelemetry'" + ) + try: from azure.ai.inference.tracing import AIInferenceInstrumentor @@ -433,7 +436,7 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str], **kwargs) -> None: ) try: - from opentelemetry.instrumentation.openai import OpenAIInstrumentor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor OpenAIInstrumentor().instrument() except ModuleNotFoundError as _: @@ -488,12 +491,29 @@ def get_connection_string(self) -> None: # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? # TODO: This could be a class method. But we don't have a class property AIProjectClient.telemetry - def enable(self, *, destination: Union[TextIOWrapper, str], **kwargs) -> None: - """Enable tracing to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) collector. + def enable(self, *, destination: Union[TextIOWrapper, str, None] = None, **kwargs) -> None: + """Enables telemetry collection with OpenTelemetry for Azure AI clients and popular GenAI libraries. + + Following instrumentations are enabled (when corresponding packages are installed): + + - Azure AI Inference (`azure-ai-inference`) + - Azure AI Projects (`azure-ai-projects`) + - OpenAI (`opentelemetry-instrumentation-openai-v2`) + - Langchain (`opentelemetry-instrumentation-langchain`) - :keyword destination: `sys.stdout` for tracing to console output, or a string holding the - endpoint URL of the OpenTelemetry Protocol (OTLP) collector. Required. - :paramtype destination: Union[TextIOWrapper, str] + The recording of prompt and completion messages is disabled by default. To enable it, set the + `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED` environment variable to `true`. + + When destination is provided, the method configures OpenTelemetry SDK to export traces to + stdout or OTLP (OpenTelemetry protocol) gRPC endpoint. It's recommended for local + development only. For production use, make sure to configure OpenTelemetry SDK directly. + + :keyword destination: Recommended for local testing only. Set it to `sys.stdout` for + tracing to console output, or a string holding the OpenTelemetry protocol (OTLP) + endpoint such as "http://localhost:4317. + If not provided, the method enables instrumentations, but does not configure OpenTelemetry + SDK to export traces. + :paramtype destination: Union[TextIOWrapper, str, None] """ _enable_telemetry(destination=destination, **kwargs) @@ -689,9 +709,9 @@ def create_agent( :return: An Agent object. :raises: HttpResponseError for HTTP errors. """ - + self._validate_tools_and_tool_resources(tools, tool_resources) - + if body is not _Unset: if isinstance(body, io.IOBase): return super().create_agent(body=body, content_type=content_type, **kwargs) @@ -715,7 +735,7 @@ def create_agent( metadata=metadata, **kwargs, ) - + @overload def update_agent( self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any @@ -801,7 +821,7 @@ def update_agent( :rtype: ~azure.ai.projects.models.Agent :raises ~azure.core.exceptions.HttpResponseError: """ - + @overload def update_agent( self, @@ -863,7 +883,7 @@ def update_agent( :rtype: ~azure.ai.projects.models.Agent :raises ~azure.core.exceptions.HttpResponseError: """ - + @overload def update_agent( @@ -895,7 +915,7 @@ def update_agent( instructions: Optional[str] = None, tools: Optional[List[_models.ToolDefinition]] = None, tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.ToolSet] = None, + toolset: Optional[_models.ToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, @@ -955,7 +975,7 @@ def update_agent( :raises ~azure.core.exceptions.HttpResponseError: """ self._validate_tools_and_tool_resources(tools, tool_resources) - + if body is not _Unset: if isinstance(body, io.IOBase): return super().update_agent(body=body, content_type=content_type, **kwargs) @@ -980,18 +1000,18 @@ def update_agent( metadata=metadata, **kwargs, ) - + def _validate_tools_and_tool_resources(self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources]): if tool_resources is None: return if tools is None: tools = [] - + if tool_resources.file_search is not None and not any(isinstance(tool, _models.FileSearchToolDefinition) for tool in tools): raise ValueError("Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided") if tool_resources.code_interpreter is not None and not any(isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools): raise ValueError("Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided") - + def _get_toolset(self) -> Optional[_models.ToolSet]: """ Get the toolset for the agent. @@ -2534,7 +2554,7 @@ def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str file.write(chunk) else: raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") - + logger.debug(f"File '{sanitized_file_name}' saved successfully at '{target_file_path}'.") except (ValueError, RuntimeError, TypeError, IOError) as e: diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py index 8520028205e4..0de29ddd2a32 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py @@ -16,8 +16,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http - pip install azure.monitor.opentelemetry + pip install azure-ai-projects azure-identity opentelemetry-sdk azure-monitor-opentelemetry Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py index 945d5e7018c2..51723d56917b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py @@ -15,7 +15,13 @@ Before running the sample: - pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + pip install azure-ai-projects azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. @@ -27,7 +33,6 @@ import sys from azure.ai.projects.aio import AIProjectClient from azure.identity import DefaultAzureCredential -from azure.ai.projects.tracing.agents import AIAgentsInstrumentor from opentelemetry import trace import os @@ -46,6 +51,8 @@ async def main(): ) # Enable console tracing + # or, if you have local OTLP endpoint running, change it to + # project_client.telemetry.enable(destination="http://localhost:4317") project_client.telemetry.enable(destination=sys.stdout) async with project_client: diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py index c2c1033a2ed4..f1f7e34cade3 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py @@ -16,8 +16,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http - pip install azure.monitor.opentelemetry + pip install azure-ai-projects azure-identity azure-monitor-opentelemetry Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. @@ -41,6 +40,7 @@ ) # Enable Azure Monitor tracing + application_insights_connection_string = project_client.telemetry.get_connection_string() if not application_insights_connection_string: print("Application Insights was not enabled for this project.") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing.py index 6138cf1765d5..5466568be70b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing.py @@ -15,7 +15,13 @@ Before running the sample: - pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + pip install azure-ai-projects azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. @@ -38,6 +44,8 @@ ) # Enable console tracing +# or, if you have local OTLP endpoint running, change it to +# project_client.telemetry.enable(destination="http://localhost:4317") project_client.telemetry.enable(destination=sys.stdout) scenario = os.path.basename(__file__) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py index 48eeb0b8749e..c3fa5fb200a3 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py @@ -16,8 +16,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http - pip install azure.monitor.opentelemetry + pip install azure-ai-projects azure-identity opentelemetry-sdk azure-monitor-opentelemetry Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py index 2b3169e856d1..b871f2297b83 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py @@ -15,7 +15,13 @@ Before running the sample: - pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + pip install azure-ai-projects azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. @@ -42,6 +48,8 @@ ) # Enable console tracing +# or, if you have local OTLP endpoint running, change it to +# project_client.telemetry.enable(destination="http://localhost:4317") project_client.telemetry.enable(destination=sys.stdout) scenario = os.path.basename(__file__) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py index 63ffcf7b793a..bcd2428b9959 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py @@ -16,8 +16,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http - pip install azure.monitor.opentelemetry + pip install azure-ai-projects azure-identity opentelemetry-sdk azure-monitor-opentelemetry Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py index d98b91021e46..ab5b8f89e3a6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py @@ -15,13 +15,18 @@ Before running the sample: - pip install azure.ai.projects azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + pip install azure-ai-projects azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. - """ import os,sys @@ -79,6 +84,8 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") # Enable console tracing +# or, if you have local OTLP endpoint running, change it to +# project_client.telemetry.enable(destination="http://localhost:4317") project_client.telemetry.enable(destination=sys.stdout) scenario = os.path.basename(__file__) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py index 6e26adea5817..005325ec02cc 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py @@ -5,7 +5,7 @@ """ DESCRIPTION: - Given an AIProjectClient, this sample demonstrates how to get an authenticated + Given an AIProjectClient, this sample demonstrates how to get an authenticated ChatCompletionsClient from the azure.ai.inference package. The client is already instrumented to upload traces to Azure Monitor. View the results in the "Tracing" tab in your Azure AI Studio project page. @@ -15,7 +15,7 @@ Before running the sample: - pip install azure-ai-projects azure-ai-inference azure-identity azure.monitor.opentelemetry + pip install azure-ai-projects azure-ai-inference azure-identity azure-monitor-opentelemetry Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. @@ -43,6 +43,10 @@ print("Application Insights was not enabled for this project.") print("Enable it via the 'Tracing' tab in your AI Studio project page.") exit() + + # Enable additional instrumentations for openai and langchain + # which are not included by Azure Monitor out of the box + project_client.telemetry.enable() configure_azure_monitor(connection_string=application_insights_connection_string) # Get an authenticated azure.ai.inference ChatCompletionsClient for your default Serverless connection: diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py index 9c86bc51a717..6dcb00ec380a 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py @@ -5,7 +5,7 @@ """ DESCRIPTION: - Given an AIProjectClient, this sample demonstrates how to get an authenticated + Given an AIProjectClient, this sample demonstrates how to get an authenticated ChatCompletionsClient from the azure.ai.inference package. The client is already instrumented with console OpenTelemetry tracing. @@ -14,7 +14,13 @@ Before running the sample: - pip install azure-ai-projects azure-ai-inference azure-identity opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + pip install azure-ai-projects azure-ai-inference azure-identity opentelemetry-sdk azure-core-tracing-opentelemetry + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. @@ -37,6 +43,8 @@ ) as project_client: # Enable console tracing + # or, if you have local OTLP endpoint running, change it to + # project_client.telemetry.enable(destination="http://localhost:4317") project_client.telemetry.enable(destination=sys.stdout) # Get an authenticated azure.ai.inference ChatCompletionsClient for your default Serverless connection: diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py index f9c49842ed93..431344daec56 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py @@ -5,7 +5,7 @@ """ DESCRIPTION: - Given an AIProjectClient, this sample demonstrates how to get an authenticated + Given an AIProjectClient, this sample demonstrates how to get an authenticated AzureOpenAI client from the openai package. The client is already instrumented to upload traces to Azure Monitor. View the results in the "Tracing" tab in your Azure AI Studio project page. @@ -15,12 +15,12 @@ Before running the sample: - pip install azure-ai-projects openai azure.monitor.opentelemetry opentelemetry-instrumentation-openai + pip install azure-ai-projects openai azure-monitor-opentelemetry opentelemetry-instrumentation-openai-v2 Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. - * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + * OTEL_INSTRUMENTATION_OPENAI_CAPTURE_MESSAGE_CONTENT - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. Update the Azure OpenAI api-version as needed (see `api_version=` below). Values can be found here: @@ -45,6 +45,10 @@ print("Application Insights was not enabled for this project.") print("Enable it via the 'Tracing' tab in your AI Studio project page.") exit() + + # Enable additional instrumentations for openai and langchain + # which are not included by Azure Monitor out of the box + project_client.telemetry.enable() configure_azure_monitor(connection_string=application_insights_connection_string) # Get an authenticated OpenAI client for your default Azure OpenAI connection: diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py index 77d395ce63f0..a2b2b55af982 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py @@ -5,7 +5,7 @@ """ DESCRIPTION: - Given an AIProjectClient, this sample demonstrates how to get an authenticated + Given an AIProjectClient, this sample demonstrates how to get an authenticated AzureOpenAI client from the openai package. The client is already instrumented with console OpenTelemetry tracing. @@ -14,12 +14,18 @@ Before running the sample: - pip install azure-ai-projects openai opentelemetry.instrumentation.openai opentelemetry-sdk opentelemetry-exporter-otlp-proto-http + pip install azure-ai-projects openai opentelemetry-sdk opentelemetry-instrumentation-openai-v2 + + If you want to export telemetry to OTLP endpoint (such as Aspire dashboard + https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash) + install: + + pip install opentelemetry-exporter-otlp-proto-grpc Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. - * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat + * OTEL_INSTRUMENTATION_OPENAI_CAPTURE_MESSAGE_CONTENT - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. Update the Azure OpenAI api-version as needed (see `api_version=` below). Values can be found here: @@ -39,6 +45,8 @@ ) as project_client: # Enable console tracing + # or, if you have local OTLP endpoint running, change it to + # project_client.telemetry.enable(destination="http://localhost:4317") project_client.telemetry.enable(destination=sys.stdout) # Get an authenticated OpenAI client for your default Azure OpenAI connection: From 59a3228b2d19ee10bc7d1d4f899bd51e830b1b63 Mon Sep 17 00:00:00 2001 From: Jarno Hakulinen Date: Fri, 1 Nov 2024 07:36:11 -0800 Subject: [PATCH 073/138] Jhakulin/user functions optional (#38261) * update to get file content * agents async updates * update * fix await/pylance issue * fix pylance error on AsyncToolSet * return empty set if no tool resources for code interpreter * added async toolset sample * updates to user functions * update * fix the array type for parameters * support for Optional * update * one more example with list of dicts --- .../azure/ai/projects/models/_patch.py | 50 +++++++++++++---- .../samples/agents/user_functions.py | 56 ++++++++++++++----- 2 files changed, 80 insertions(+), 26 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 6c2fa9af95b9..cebbfd8a1b03 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -60,6 +60,7 @@ Set, get_origin, get_args, + Union, ) logger = logging.getLogger(__name__) @@ -237,18 +238,13 @@ def get_token(self) -> AccessToken: "int": "integer", "float": "number", "bool": "boolean", - "bytes": "string", # Typically encoded as base64-encoded strings in JSON "NoneType": "null", - "datetime": "string", # Use format "date-time" - "date": "string", # Use format "date" - "UUID": "string", # Use format "uuid" "list": "array", "dict": "object", } def _map_type(annotation) -> Dict[str, Any]: - if annotation == inspect.Parameter.empty: return {"type": "string"} # Default type if annotation is missing @@ -259,18 +255,43 @@ def _map_type(annotation) -> Dict[str, Any]: item_type = args[0] if args else str return { "type": "array", - "items": {"type": type_map.get(item_type.__name__, "string")} + "items": _map_type(item_type) } elif origin in {dict, Dict}: return {"type": "object"} - elif hasattr(annotation, "__name__"): - return {"type": type_map.get(annotation.__name__, "string")} + elif origin is Union: + args = get_args(annotation) + # If Union contains None, it is an optional parameter + if type(None) in args: + # If Union contains only one non-None type, it is a nullable parameter + non_none_args = [arg for arg in args if arg is not type(None)] + if len(non_none_args) == 1: + schema = _map_type(non_none_args[0]) + if "type" in schema: + if isinstance(schema["type"], str): + schema["type"] = [schema["type"], "null"] + elif "null" not in schema["type"]: + schema["type"].append("null") + else: + schema["type"] = ["null"] + return schema + # If Union contains multiple types, it is a oneOf parameter + return {"oneOf": [_map_type(arg) for arg in args]} elif isinstance(annotation, type): - return {"type": type_map.get(annotation.__name__, "string")} + schema_type = type_map.get(annotation.__name__, "string") + return {"type": schema_type} return {"type": "string"} # Fallback to "string" if type is unrecognized +def is_optional(annotation) -> bool: + origin = get_origin(annotation) + if origin is Union: + args = get_args(annotation) + return type(None) in args + return False + + class Tool(ABC): """ An abstract class representing a tool that can be used by an agent. @@ -297,7 +318,8 @@ def execute(self, tool_call: Any) -> Any: :return: The output of the tool operations. """ pass - + + class BaseFunctionTool(Tool): """ A tool that executes user-defined functions. @@ -349,6 +371,7 @@ def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDef param_descs[param_name] = param_desc.strip() properties = {} + required = [] for param_name, param in params.items(): param_type_info = _map_type(param.annotation) param_description = param_descs.get(param_name, "No description") @@ -358,13 +381,17 @@ def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDef "description": param_description } + # If the parameter has no default value and is not optional, add it to the required list + if param.default is inspect.Parameter.empty and not is_optional(param.annotation): + required.append(param_name) + function_def = FunctionDefinition( name=name, description=description, parameters={ "type": "object", "properties": properties, - "required": list(params.keys()) + "required": required }, ) tool_def = FunctionToolDefinition(function=function_def) @@ -427,7 +454,6 @@ def execute(self, tool_call: RequiredFunctionToolCall) -> Any: return json.dumps({"error": error_message}) - class AsyncFunctionTool(BaseFunctionTool): async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: diff --git a/sdk/ai/azure-ai-projects/samples/agents/user_functions.py b/sdk/ai/azure-ai-projects/samples/agents/user_functions.py index ebad651932f6..27304bf1a90c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/user_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/user_functions.py @@ -5,20 +5,28 @@ import json import datetime -from typing import Any, Callable, Set, Dict, List +from typing import Any, Callable, Set, Dict, List, Optional # These are the user-defined functions that can be called by the agent. -def fetch_current_datetime() -> str: +def fetch_current_datetime(format: Optional[str] = None) -> str: """ - Get the current time as a JSON string. + Get the current time as a JSON string, optionally formatted. + :param format (Optional[str]): The format in which to return the current time. Defaults to None, which uses a standard format. :return: The current time in JSON format. :rtype: str """ current_time = datetime.datetime.now() - time_json = json.dumps({"current_time": current_time.strftime("%Y-%m-%d %H:%M:%S")}) + + # Use the provided format if available, else use a default format + if format: + time_format = format + else: + time_format = "%Y-%m-%d %H:%M:%S" + + time_json = json.dumps({"current_time": current_time.strftime(time_format)}) return time_json @@ -157,33 +165,52 @@ def longest_word_in_sentences(sentences: List[str]) -> str: return json.dumps({"longest_words": longest_words}) -# Example Questions for Each Function +def process_records(records: List[Dict[str, int]]) -> str: + """ + Process a list of records, where each record is a dictionary with string keys and integer values. + + :param records: A list containing dictionaries that map strings to integers. + :return: A list of sums of the integer values in each record. + """ + sums = [] + for record in records: + # Sum up all the values in each dictionary and append the result to the sums list + total = sum(record.values()) + sums.append(total) + return json.dumps({"sums": sums}) + + +# Example User Input for Each Function # 1. Fetch Current DateTime -# Question: "What is the current date and time?" +# User Input: "What is the current date and time?" +# User Input: "What is the current date and time in '%Y-%m-%d %H:%M:%S' format?" # 2. Fetch Weather -# Question: "Can you provide the weather information for New York?" +# User Input: "Can you provide the weather information for New York?" # 3. Send Email -# Question: "Send an email to john.doe@example.com with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" +# User Input: "Send an email to john.doe@example.com with the subject 'Meeting Reminder' and body 'Don't forget our meeting at 3 PM.'" # 4. Calculate Sum -# Question: "What is the sum of 45 and 55?" +# User Input: "What is the sum of 45 and 55?" # 5. Convert Temperature -# Question: "Convert 25 degrees Celsius to Fahrenheit." +# User Input: "Convert 25 degrees Celsius to Fahrenheit." # 6. Toggle Flag -# Question: "Toggle the flag True." +# User Input: "Toggle the flag True." # 7. Merge Dictionaries -# Question: "Merge these two dictionaries: {'name': 'Alice'} and {'age': 30}." +# User Input: "Merge these two dictionaries: {'name': 'Alice'} and {'age': 30}." # 8. Get User Info -# Question: "Retrieve user information for user ID 1." +# User Input: "Retrieve user information for user ID 1." # 9. Longest Word in Sentences -# Question: "Find the longest word in each of these sentences: ['The quick brown fox jumps over the lazy dog', 'Python is an amazing programming language', 'Azure AI capabilities are impressive']." +# User Input: "Find the longest word in each of these sentences: ['The quick brown fox jumps over the lazy dog', 'Python is an amazing programming language', 'Azure AI capabilities are impressive']." + +# 10. Process Records +# User Input: "Process the following records: [{'a': 10, 'b': 20}, {'x': 5, 'y': 15, 'z': 25}, {'m': 30}]." # Statically defined user functions for fast reference user_functions: Set[Callable[..., Any]] = { @@ -196,4 +223,5 @@ def longest_word_in_sentences(sentences: List[str]) -> str: merge_dicts, get_user_info, longest_word_in_sentences, + process_records, } From 99b5b9ff4139f831dc983ad10cfc8f0ffa192629 Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Mon, 4 Nov 2024 09:37:30 -0600 Subject: [PATCH 074/138] adding content recording control to agents instrumentor api (#38268) Co-authored-by: Marko Hietala --- .../agents/_ai_agents_instrumentor.py | 87 +++++++++++++++++-- 1 file changed, 78 insertions(+), 9 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py index ce345cf0b883..b38b72c85c76 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py @@ -71,12 +71,27 @@ def __init__(self): # and have a parameter that specifies the version to use. self._impl = _AIAgentsInstrumentorPreview() - def instrument(self) -> None: + def instrument(self, enable_content_recording: bool = None) -> None: """ Enable trace instrumentation for AI Agents. + :param enable_content_recording: Whether content recording is enabled as part + of the traces or not. Content in this context refers to chat message content + and function call tool related function names, function parameter names and + values. True will enable content recording, False will disable it. If no value + is provided, then the value read from environment variable + AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable + is not found, then the value will default to False. Please note that successive calls + to insturment will always apply the content recording value provided with the most + recent call to instrument (including applying the environment variable if no value is + provided and defaulting to false if the environment variable is not found), even if + instrument was already previously called without uninstrument being called in between + the instrument calls. + + :type enable_content_recording: bool, optional + """ - self._impl.instrument() + self._impl.instrument(enable_content_recording) def uninstrument(self) -> None: """ @@ -96,6 +111,14 @@ def is_instrumented(self) -> bool: """ return self._impl.is_instrumented() + def is_content_recording_enabled(self) -> bool: + """This function gets the content recording value. + + :return: A bool value indicating whether content recording is enabled. + :rtype bool + """ + return self._impl.is_content_recording_enabled() + class _AIAgentsInstrumentorPreview: """ @@ -110,18 +133,27 @@ def _str_to_bool(self, s): return False return str(s).lower() == "true" - def instrument(self): + def instrument(self, enable_content_recording: bool = None): """ Enable trace instrumentation for AI Agents. - This method checks the environment variable - 'AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED' to determine - whether to enable content tracing. + :param enable_content_recording: Whether content recording is enabled as part + of the traces or not. Content in this context refers to chat message content + and function call tool related function names, function parameter names and + values. True will enable content recording, False will disable it. If no value + is provided, then the value read from environment variable + AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable + is not found, then the value will default to False. + + :type enable_content_recording: bool, optional """ - if not self.is_instrumented(): + if enable_content_recording is None: var_value = os.environ.get("AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED") - enable_content_tracing = self._str_to_bool(var_value) - self._instrument_agents(enable_content_tracing) + enable_content_recording = self._str_to_bool(var_value) + if not self.is_instrumented(): + self._instrument_agents(enable_content_recording) + else: + self._set_enable_content_recording(enable_content_recording=enable_content_recording) def uninstrument(self): """ @@ -142,6 +174,24 @@ def is_instrumented(self): """ return self._is_instrumented() + def set_enable_content_recording(self, enable_content_recording: bool = False) -> None: + """This function sets the content recording value. + + :param enable_content_tracing: Indicates whether tracing of message content should be enabled. + This also controls whether function call tool function names, + parameter names and parameter values are traced. + :type enable_content_tracing: bool + """ + self._set_enable_content_recording(enable_content_recording=enable_content_recording) + + def is_content_recording_enabled(self) -> bool: + """This function gets the content recording value. + + :return: A bool value indicating whether content tracing is enabled. + :rtype bool + """ + return self._is_content_recording_enabled() + def _set_attributes(self, span: "AbstractSpan", *attrs: Tuple[str, Any]) -> None: for attr in attrs: key, value = attr @@ -1298,6 +1348,25 @@ def _is_instrumented(self): """ return _agents_traces_enabled + def _set_enable_content_recording(self, enable_content_recording: bool = False) -> None: + """This function sets the content recording value. + + :param enable_content_tracing: Indicates whether tracing of message content should be enabled. + This also controls whether function call tool function names, + parameter names and parameter values are traced. + :type enable_content_tracing: bool + """ + global _trace_agents_content + _trace_agents_content = enable_content_recording + + def _is_content_recording_enabled(self) -> bool: + """This function gets the content recording value. + + :return: A bool value indicating whether content tracing is enabled. + :rtype bool + """ + return _trace_agents_content + class _AgentEventHandlerTraceWrapper(AgentEventHandler): def __init__(self, inner_handler: AgentEventHandler, instrumentor: AIAgentsInstrumentor, span: "AbstractSpan"): From 139ca5d6de90f558e25b88391afd08bd835333a8 Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Mon, 4 Nov 2024 12:13:50 -0800 Subject: [PATCH 075/138] Howie/save file test (#38306) * Added test for save_file * resolved comment --- .../tests/agents/test_agents_client.py | 107 +++++++++++------- 1 file changed, 65 insertions(+), 42 deletions(-) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index fb10f2f007f6..ffd39a474a6f 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -6,6 +6,7 @@ # cSpell:disable import os import json +import tempfile import time import functools import datetime @@ -13,8 +14,7 @@ import sys from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import FunctionTool, CodeInterpreterTool, FileSearchTool, ToolSet -from azure.ai.projects.models import CodeInterpreterToolResource, FileSearchToolResource, ToolResources +from azure.ai.projects.models import FunctionTool, CodeInterpreterTool, FileSearchTool, ToolSet, CodeInterpreterToolResource, FileSearchToolResource, ToolResources, OpenAIFile, FilePurpose from azure.core.pipeline.transport import RequestsTransport from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy from azure.core.exceptions import AzureError, ServiceRequestError, HttpResponseError @@ -1148,47 +1148,70 @@ def test_create_agent_with_invalid_file_search_tool_resource(self, **kwargs): assert exception_message == "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" - # # ********************************************************************************** - # # - # # HAPPY PATH SERVICE TESTS - Streaming APIs - # # - # # ********************************************************************************** - - # # ********************************************************************************** - # # - # # NEGATIVE TESTS - TODO idk what goes here - # # - # # ********************************************************************************** - """ - # DISABLED, PASSES LIVE ONLY: recordings don't capture DNS lookup errors - # test agent creation and deletion @agentClientPreparer() @recorded_by_proxy - def test_negative_create_delete_agent(self, **kwargs): - # create client using bad endpoint - bad_connection_string = "https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm" + def test_code_interpreter_and_save_file(self, **kwargs): + output_file_exist = False + + # create client + with self.create_client(**kwargs) as client: + file: OpenAIFile = None + + with tempfile.TemporaryDirectory(delete=True) as temp_dir: + + # create a temporary input file for upload + test_file_path = os.path.join(temp_dir, "input.txt") + + with open(test_file_path, "w") as f: + f.write("This is a test file") + + file = client.agents.upload_file_and_poll(file_path=test_file_path, purpose=FilePurpose.AGENTS) + + # create agent + code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + agent = client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # create a message + message = client.agents.create_message( + thread_id=thread.id, + role="user", + content="Create an image file same as the text file and give me file id?", + ) + print(f"Created message, message ID: {message.id}") + + # create run + run = client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + # delete file + client.agents.delete_file(file.id) + print("Deleted file") + + # get messages + messages = client.agents.get_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + last_msg = messages.get_last_text_message_by_sender("assistant") + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + for file_path_annotation in messages.file_path_annotations: + file_id = file_path_annotation.file_path.file_id + print(f"Image File ID: {file_path_annotation.file_path.file_id}") + temp_file_path = os.path.join(temp_dir, "output.png") + client.agents.save_file(file_id=file_id, file_name="output.png", target_dir=temp_dir) + output_file_exist = os.path.exists(temp_file_path) + + assert output_file_exist - credential = self.get_credential(AIProjectClient, is_async=False) - client = AIProjectClient.from_connection_string( - credential=credential, - connection=bad_connection_string, - ) - - # attempt to create agent with bad client - exception_caught = False - try: - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") - # check for error (will not have a status code since it failed on request -- no response was recieved) - except (ServiceRequestError, HttpResponseError) as e: - exception_caught = True - if type(e) == ServiceRequestError: - assert e.message - assert "failed to resolve 'foo.bar.some-domain.ms'" in e.message.lower() - else: - assert "No such host is known" and "foo.bar.some-domain.ms" in str(e) - - # close client and confirm an exception was caught - client.close() - assert exception_caught - """ From 904c9efe891485ae09dc551c6d934676492cd2d5 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Mon, 4 Nov 2024 12:38:42 -0800 Subject: [PATCH 076/138] Mypy fixes for instrumentor (#38302) * Fixes * Regenerate code * Merge again * Linter fixes * Regenerate the code * Fix mypy * More mypy fixes * Fix --- .../azure/ai/projects/_types.py | 1 - .../ai/projects/aio/operations/_operations.py | 2 +- .../ai/projects/aio/operations/_patch.py | 63 +-- .../azure/ai/projects/models/__init__.py | 8 +- .../azure/ai/projects/models/_enums.py | 4 +- .../azure/ai/projects/models/_models.py | 223 +++++----- .../azure/ai/projects/models/_patch.py | 70 +-- .../ai/projects/operations/_operations.py | 2 +- .../azure/ai/projects/operations/_patch.py | 48 ++- .../agents/_ai_agents_instrumentor.py | 403 +++++++++++------- .../ai/projects/telemetry/agents/_utils.py | 59 +-- ...basics_async_with_azure_monitor_tracing.py | 1 + ...gents_basics_async_with_console_tracing.py | 1 + .../sample_agents_code_interpreter_async.py | 4 +- .../sample_agents_run_with_toolset_async.py | 2 +- ...gents_basics_with_azure_monitor_tracing.py | 4 +- ...mple_agents_basics_with_console_tracing.py | 4 +- ...ts_functions_with_azure_monitor_tracing.py | 23 +- ...e_agents_functions_with_console_tracing.py | 23 +- ...eventhandler_with_azure_monitor_tracing.py | 7 +- ...tream_eventhandler_with_console_tracing.py | 7 +- .../samples/agents/user_functions.py | 16 +- .../async_samples/sample_connections_async.py | 4 +- .../samples/connections/sample_connections.py | 6 +- ...ns_with_azure_ai_inference_client_async.py | 4 +- ...gs_with_azure_ai_inference_client_async.py | 4 +- ...pletions_with_azure_ai_inference_client.py | 4 +- ...erence_client_and_azure_monitor_tracing.py | 4 +- ...ai_inference_client_and_console_tracing.py | 4 +- .../tests/agents/test_agents_client.py | 36 +- .../tests/connections/test_connections.py | 26 +- .../connections/test_connections_async.py | 32 +- .../test_connections_unit_tests.py | 4 +- .../tests/inference/test_inference.py | 6 +- .../tests/inference/test_inference_async.py | 2 +- sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 36 files changed, 634 insertions(+), 479 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py index c438829bda41..b540a961b2f1 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py @@ -10,7 +10,6 @@ if TYPE_CHECKING: from . import models as _models - from .. import models as _models AgentsApiResponseFormatOption = Union[ str, str, "_models.AgentsApiResponseFormatMode", "_models.AgentsApiResponseFormat" ] diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index cc99f2615f83..419f86e97145 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -5042,7 +5042,7 @@ async def _list_connections( """List the details of all the connections (not including their credentials). :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". Default value is None. + "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". Default value is None. :paramtype category: str or ~azure.ai.projects.models.ConnectionType :keyword include_all: Indicates whether to list datastores. Service default: do not list datastores. Default value is None. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 388383c20eff..58220a597f31 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -1,5 +1,4 @@ # pylint: disable=too-many-lines -# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -57,9 +56,9 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" """ kwargs.setdefault("merge_span", True) - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on + # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection : bool = (os.getenv("USE_SERVERLESS_CONNECTION", None) == "true") + use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" if use_serverless_connection: connection = await self._outer_instance.connections.get_default( @@ -92,17 +91,13 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" ) from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient( - endpoint=endpoint, credential=AzureKeyCredential(connection.key) - ) + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" ) - client = ChatCompletionsClient( - endpoint=endpoint, credential=connection.properties.token_credential - ) + client = ChatCompletionsClient(endpoint=endpoint, credential=connection.properties.token_credential) elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( @@ -126,9 +121,9 @@ async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": """ kwargs.setdefault("merge_span", True) - # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on + # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection : bool = (os.getenv("USE_SERVERLESS_CONNECTION", None) == "true") + use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" if use_serverless_connection: connection = await self._outer_instance.connections.get_default( @@ -167,9 +162,7 @@ async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" ) - client = EmbeddingsClient( - endpoint=endpoint, credential=connection.properties.token_credential - ) + client = EmbeddingsClient(endpoint=endpoint, credential=connection.properties.token_credential) elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( @@ -321,7 +314,7 @@ async def get(self, *, connection_name: str, with_credentials: bool = False, **k return ConnectionProperties(connection=connection) else: try: - connection=await self._get_connection(connection_name=connection_name, **kwargs) + connection = await self._get_connection(connection_name=connection_name, **kwargs) except ResourceNotFoundError as _: return None return ConnectionProperties(connection=connection) @@ -615,7 +608,7 @@ async def create_agent( metadata=metadata, **kwargs, ) - + @overload async def update_agent( self, assistant_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any @@ -650,7 +643,7 @@ async def update_agent( top_p: Optional[float] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.Agent: """Modifies an existing agent. @@ -701,7 +694,7 @@ async def update_agent( :rtype: ~azure.ai.projects.models.Agent :raises ~azure.core.exceptions.HttpResponseError: """ - + @overload async def update_agent( self, @@ -717,7 +710,7 @@ async def update_agent( top_p: Optional[float] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.Agent: """Modifies an existing agent. @@ -763,7 +756,6 @@ async def update_agent( :rtype: ~azure.ai.projects.models.Agent :raises ~azure.core.exceptions.HttpResponseError: """ - @overload async def update_agent( @@ -795,13 +787,13 @@ async def update_agent( instructions: Optional[str] = None, tools: Optional[List[_models.ToolDefinition]] = None, tool_resources: Optional[_models.ToolResources] = None, - toolset: Optional[_models.AsyncToolSet] = None, + toolset: Optional[_models.AsyncToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, content_type: str = "application/json", metadata: Optional[Dict[str, str]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.Agent: """Modifies an existing agent. @@ -855,7 +847,7 @@ async def update_agent( :raises ~azure.core.exceptions.HttpResponseError: """ self._validate_tools_and_tool_resources(tools, tool_resources) - + if body is not _Unset: if isinstance(body, io.IOBase): return await super().update_agent(body=body, content_type=content_type, **kwargs) @@ -880,18 +872,27 @@ async def update_agent( metadata=metadata, **kwargs, ) - - def _validate_tools_and_tool_resources(self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources]): + + def _validate_tools_and_tool_resources( + self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources] + ): if tool_resources is None: return if tools is None: tools = [] - - if tool_resources.file_search is not None and not any(isinstance(tool, _models.FileSearchToolDefinition) for tool in tools): - raise ValueError("Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided") - if tool_resources.code_interpreter is not None and not any(isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools): - raise ValueError("Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided") - + + if tool_resources.file_search is not None and not any( + isinstance(tool, _models.FileSearchToolDefinition) for tool in tools + ): + raise ValueError( + "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" + ) + if tool_resources.code_interpreter is not None and not any( + isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools + ): + raise ValueError( + "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" + ) def _get_toolset(self) -> Optional[_models.AsyncToolSet]: """ diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py index 0faab636008c..353ea686df0a 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py @@ -26,8 +26,6 @@ BingGroundingToolDefinition, CodeInterpreterToolDefinition, CodeInterpreterToolResource, - ConnectionListResource, - ConnectionResource, CronTrigger, Dataset, Evaluation, @@ -128,6 +126,8 @@ ThreadMessage, ThreadMessageOptions, ThreadRun, + ToolConnection, + ToolConnectionList, ToolDefinition, ToolOutput, ToolResources, @@ -207,8 +207,6 @@ "BingGroundingToolDefinition", "CodeInterpreterToolDefinition", "CodeInterpreterToolResource", - "ConnectionListResource", - "ConnectionResource", "CronTrigger", "Dataset", "Evaluation", @@ -309,6 +307,8 @@ "ThreadMessage", "ThreadMessageOptions", "ThreadRun", + "ToolConnection", + "ToolConnectionList", "ToolDefinition", "ToolOutput", "ToolResources", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py index afd4db01c895..4e9ce22d8c67 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py @@ -41,8 +41,8 @@ class AgentsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Tool type ``bing_grounding``""" MICROSOFT_FABRIC = "microsoft_fabric" """Tool type ``microsoft_fabric``""" - SHAREPOINT = "sharepoint" - """Tool type ``sharepoint``""" + SHAREPOINT = "sharepoint_grounding" + """Tool type ``sharepoint_grounding``""" AZURE_AI_SEARCH = "azure_ai_search" """Tool type ``azure_ai_search``""" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 0f37a301d0fe..56eb498f3e62 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -223,7 +223,7 @@ class AgentsNamedToolChoice(_model_base.Model): :ivar type: the type of tool. If type is ``function``\\ , the function name must be set. Required. Known values are: "function", "code_interpreter", "file_search", "bing_grounding", - "microsoft_fabric", "sharepoint", and "azure_ai_search". + "microsoft_fabric", "sharepoint_grounding", and "azure_ai_search". :vartype type: str or ~azure.ai.projects.models.AgentsNamedToolChoiceType :ivar function: The name of the function to call. :vartype function: ~azure.ai.projects.models.FunctionName @@ -232,7 +232,7 @@ class AgentsNamedToolChoice(_model_base.Model): type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field() """the type of tool. If type is ``function``\ , the function name must be set. Required. Known values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", - \"microsoft_fabric\", \"sharepoint\", and \"azure_ai_search\".""" + \"microsoft_fabric\", \"sharepoint_grounding\", and \"azure_ai_search\".""" function: Optional["_models.FunctionName"] = rest_field() """The name of the function to call.""" @@ -567,15 +567,21 @@ class BingGroundingToolDefinition(ToolDefinition, discriminator="bing_grounding" :ivar type: The object type, which is always 'bing_grounding'. Required. Default value is "bing_grounding". :vartype type: str + :ivar bing_grounding: The list of connections used by the bing grounding tool. Required. + :vartype bing_grounding: ~azure.ai.projects.models.ToolConnectionList """ type: Literal["bing_grounding"] = rest_discriminator(name="type") # type: ignore """The object type, which is always 'bing_grounding'. Required. Default value is \"bing_grounding\".""" + bing_grounding: "_models.ToolConnectionList" = rest_field() + """The list of connections used by the bing grounding tool. Required.""" @overload def __init__( self, + *, + bing_grounding: "_models.ToolConnectionList", ) -> None: ... @overload @@ -650,67 +656,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ConnectionListResource(_model_base.Model): - """A set of connection resources currently used by either the ``bing_grounding``\\ , - ``microsoft_fabric``\\ , or ``sharepoint`` tools. - - :ivar connection_list: The connections attached to this agent. There can be a maximum of 1 - connection - resource attached to the agent. - :vartype connection_list: list[~azure.ai.projects.models.ConnectionResource] - """ - - connection_list: Optional[List["_models.ConnectionResource"]] = rest_field(name="connections") - """The connections attached to this agent. There can be a maximum of 1 connection - resource attached to the agent.""" - - @overload - def __init__( - self, - *, - connection_list: Optional[List["_models.ConnectionResource"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - -class ConnectionResource(_model_base.Model): - """A connection resource. - - - :ivar connection_id: A connection in a ConnectionListResource attached to this agent. Required. - :vartype connection_id: str - """ - - connection_id: str = rest_field() - """A connection in a ConnectionListResource attached to this agent. Required.""" - - @overload - def __init__( - self, - *, - connection_id: str, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - - class CredentialsApiKeyAuth(_model_base.Model): """The credentials needed for API key authentication. @@ -1431,7 +1376,7 @@ class InternalConnectionPropertiesAADAuth(InternalConnectionProperties, discrimi authentication :vartype auth_type: str or ~azure.ai.projects.models.AAD :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". + "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". :vartype category: str or ~azure.ai.projects.models.ConnectionType :ivar target: The connection URL to be used for this service. Required. :vartype target: str @@ -1441,7 +1386,7 @@ class InternalConnectionPropertiesAADAuth(InternalConnectionProperties, discrimi """Authentication type of the connection target. Required. Entra ID authentication""" category: Union[str, "_models.ConnectionType"] = rest_field() """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", and \"AIServices\".""" + \"AzureBlob\", \"AIServices\", and \"CognitiveSearch\".""" target: str = rest_field() """The connection URL to be used for this service. Required.""" @@ -1453,7 +1398,7 @@ class InternalConnectionPropertiesApiKeyAuth(InternalConnectionProperties, discr :ivar auth_type: Authentication type of the connection target. Required. API Key authentication :vartype auth_type: str or ~azure.ai.projects.models.API_KEY :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". + "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". :vartype category: str or ~azure.ai.projects.models.ConnectionType :ivar credentials: Credentials will only be present for authType=ApiKey. Required. :vartype credentials: ~azure.ai.projects.models._models.CredentialsApiKeyAuth @@ -1465,7 +1410,7 @@ class InternalConnectionPropertiesApiKeyAuth(InternalConnectionProperties, discr """Authentication type of the connection target. Required. API Key authentication""" category: Union[str, "_models.ConnectionType"] = rest_field() """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", and \"AIServices\".""" + \"AzureBlob\", \"AIServices\", and \"CognitiveSearch\".""" credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" target: str = rest_field() @@ -1480,7 +1425,7 @@ class InternalConnectionPropertiesSASAuth(InternalConnectionProperties, discrimi Signature (SAS) authentication :vartype auth_type: str or ~azure.ai.projects.models.SAS :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". + "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". :vartype category: str or ~azure.ai.projects.models.ConnectionType :ivar credentials: Credentials will only be present for authType=ApiKey. Required. :vartype credentials: ~azure.ai.projects.models._models.CredentialsSASAuth @@ -1493,7 +1438,7 @@ class InternalConnectionPropertiesSASAuth(InternalConnectionProperties, discrimi authentication""" category: Union[str, "_models.ConnectionType"] = rest_field() """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", and \"AIServices\".""" + \"AzureBlob\", \"AIServices\", and \"CognitiveSearch\".""" credentials: "_models._models.CredentialsSASAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" target: str = rest_field() @@ -2412,15 +2357,21 @@ class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="microsoft_fab :ivar type: The object type, which is always 'microsoft_fabric'. Required. Default value is "microsoft_fabric". :vartype type: str + :ivar microsoft_fabric: The list of connections used by the Microsoft Fabric tool. Required. + :vartype microsoft_fabric: ~azure.ai.projects.models.ToolConnectionList """ type: Literal["microsoft_fabric"] = rest_discriminator(name="type") # type: ignore """The object type, which is always 'microsoft_fabric'. Required. Default value is \"microsoft_fabric\".""" + microsoft_fabric: "_models.ToolConnectionList" = rest_field() + """The list of connections used by the Microsoft Fabric tool. Required.""" @overload def __init__( self, + *, + microsoft_fabric: "_models.ToolConnectionList", ) -> None: ... @overload @@ -4528,7 +4479,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, type="microsoft_fabric", **kwargs) -class RunStepSharepointToolCall(RunStepToolCall, discriminator="sharepoint"): +class RunStepSharepointToolCall(RunStepToolCall, discriminator="sharepoint_grounding"): """A record of a call to a SharePoint tool, issued by the model in evaluation of a defined tool, that represents executed SharePoint actions. @@ -4537,16 +4488,17 @@ class RunStepSharepointToolCall(RunStepToolCall, discriminator="sharepoint"): :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. Required. :vartype id: str - :ivar type: The object type, which is always 'sharepoint'. Required. Default value is - "sharepoint". + :ivar type: The object type, which is always 'sharepoint_grounding'. Required. Default value is + "sharepoint_grounding". :vartype type: str :ivar share_point: Reserved for future use. Required. :vartype share_point: dict[str, str] """ - type: Literal["sharepoint"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'sharepoint'. Required. Default value is \"sharepoint\".""" - share_point: Dict[str, str] = rest_field(name="sharepoint") + type: Literal["sharepoint_grounding"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'sharepoint_grounding'. Required. Default value is + \"sharepoint_grounding\".""" + share_point: Dict[str, str] = rest_field(name="sharepoint_grounding") """Reserved for future use. Required.""" @overload @@ -4565,7 +4517,7 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="sharepoint", **kwargs) + super().__init__(*args, type="sharepoint_grounding", **kwargs) class RunStepToolCallDetails(RunStepDetails, discriminator="tool_calls"): @@ -4603,21 +4555,28 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, type=RunStepType.TOOL_CALLS, **kwargs) -class SharepointToolDefinition(ToolDefinition, discriminator="sharepoint"): +class SharepointToolDefinition(ToolDefinition, discriminator="sharepoint_grounding"): """The input definition information for a sharepoint tool as used to configure an agent. - :ivar type: The object type, which is always 'sharepoint'. Required. Default value is - "sharepoint". + :ivar type: The object type, which is always 'sharepoint_grounding'. Required. Default value is + "sharepoint_grounding". :vartype type: str + :ivar sharepoint_grounding: The list of connections used by the SharePoint tool. Required. + :vartype sharepoint_grounding: ~azure.ai.projects.models.ToolConnectionList """ - type: Literal["sharepoint"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'sharepoint'. Required. Default value is \"sharepoint\".""" + type: Literal["sharepoint_grounding"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'sharepoint_grounding'. Required. Default value is + \"sharepoint_grounding\".""" + sharepoint_grounding: "_models.ToolConnectionList" = rest_field() + """The list of connections used by the SharePoint tool. Required.""" @overload def __init__( self, + *, + sharepoint_grounding: "_models.ToolConnectionList", ) -> None: ... @overload @@ -4628,7 +4587,7 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="sharepoint", **kwargs) + super().__init__(*args, type="sharepoint_grounding", **kwargs) class SubmitToolOutputsAction(RequiredAction, discriminator="submit_tool_outputs"): @@ -5148,6 +5107,67 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.object: Literal["thread.run"] = "thread.run" +class ToolConnection(_model_base.Model): + """A connection resource. + + + :ivar connection_id: A connection in a ToolConnectionList attached to this tool. Required. + :vartype connection_id: str + """ + + connection_id: str = rest_field() + """A connection in a ToolConnectionList attached to this tool. Required.""" + + @overload + def __init__( + self, + *, + connection_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ToolConnectionList(_model_base.Model): + """A set of connection resources currently used by either the ``bing_grounding``\\ , + ``microsoft_fabric``\\ , or ``sharepoint_grounding`` tools. + + :ivar connection_list: The connections attached to this tool. There can be a maximum of 1 + connection + resource attached to the tool. + :vartype connection_list: list[~azure.ai.projects.models.ToolConnection] + """ + + connection_list: Optional[List["_models.ToolConnection"]] = rest_field(name="connections") + """The connections attached to this tool. There can be a maximum of 1 connection + resource attached to the tool.""" + + @overload + def __init__( + self, + *, + connection_list: Optional[List["_models.ToolConnection"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + class ToolOutput(_model_base.Model): """The data provided during a tool outputs submission to resolve pending tool calls and allow the model to continue. @@ -5197,15 +5217,6 @@ class ToolResources(_model_base.Model): :ivar file_search: Resources to be used by the ``file_search`` tool consisting of vector store IDs. :vartype file_search: ~azure.ai.projects.models.FileSearchToolResource - :ivar bing_grounding: Resources to be used by the ``bing_grounding`` tool consisting of - connection IDs. - :vartype bing_grounding: ~azure.ai.projects.models.ConnectionListResource - :ivar microsoft_fabric: Resources to be used by the ``microsoft_fabric`` tool consisting of - connection IDs. - :vartype microsoft_fabric: ~azure.ai.projects.models.ConnectionListResource - :ivar share_point: Resources to be used by the ``sharepoint`` tool consisting of connection - IDs. - :vartype share_point: ~azure.ai.projects.models.ConnectionListResource :ivar azure_ai_search: Resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names. :vartype azure_ai_search: ~azure.ai.projects.models.AzureAISearchResource @@ -5215,12 +5226,6 @@ class ToolResources(_model_base.Model): """Resources to be used by the ``code_interpreter tool`` consisting of file IDs.""" file_search: Optional["_models.FileSearchToolResource"] = rest_field() """Resources to be used by the ``file_search`` tool consisting of vector store IDs.""" - bing_grounding: Optional["_models.ConnectionListResource"] = rest_field() - """Resources to be used by the ``bing_grounding`` tool consisting of connection IDs.""" - microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() - """Resources to be used by the ``microsoft_fabric`` tool consisting of connection IDs.""" - share_point: Optional["_models.ConnectionListResource"] = rest_field(name="sharepoint") - """Resources to be used by the ``sharepoint`` tool consisting of connection IDs.""" azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() """Resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names.""" @@ -5230,9 +5235,6 @@ def __init__( *, code_interpreter: Optional["_models.CodeInterpreterToolResource"] = None, file_search: Optional["_models.FileSearchToolResource"] = None, - bing_grounding: Optional["_models.ConnectionListResource"] = None, - microsoft_fabric: Optional["_models.ConnectionListResource"] = None, - share_point: Optional["_models.ConnectionListResource"] = None, azure_ai_search: Optional["_models.AzureAISearchResource"] = None, ) -> None: ... @@ -5363,15 +5365,6 @@ class UpdateToolResourcesOptions(_model_base.Model): :ivar file_search: Overrides the vector store attached to this agent. There can be a maximum of 1 vector store attached to the agent. :vartype file_search: ~azure.ai.projects.models.UpdateFileSearchToolResourceOptions - :ivar bing_grounding: Overrides the list of connections to be used by the ``bing_grounding`` - tool consisting of connection IDs. - :vartype bing_grounding: ~azure.ai.projects.models.ConnectionListResource - :ivar microsoft_fabric: Overrides the list of connections to be used by the - ``microsoft_fabric`` tool consisting of connection IDs. - :vartype microsoft_fabric: ~azure.ai.projects.models.ConnectionListResource - :ivar share_point: Overrides the list of connections to be used by the ``sharepoint`` tool - consisting of connection IDs. - :vartype share_point: ~azure.ai.projects.models.ConnectionListResource :ivar azure_ai_search: Overrides the resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names. :vartype azure_ai_search: ~azure.ai.projects.models.AzureAISearchResource @@ -5384,15 +5377,6 @@ class UpdateToolResourcesOptions(_model_base.Model): file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = rest_field() """Overrides the vector store attached to this agent. There can be a maximum of 1 vector store attached to the agent.""" - bing_grounding: Optional["_models.ConnectionListResource"] = rest_field() - """Overrides the list of connections to be used by the ``bing_grounding`` tool consisting of - connection IDs.""" - microsoft_fabric: Optional["_models.ConnectionListResource"] = rest_field() - """Overrides the list of connections to be used by the ``microsoft_fabric`` tool consisting of - connection IDs.""" - share_point: Optional["_models.ConnectionListResource"] = rest_field(name="sharepoint") - """Overrides the list of connections to be used by the ``sharepoint`` tool consisting of - connection IDs.""" azure_ai_search: Optional["_models.AzureAISearchResource"] = rest_field() """Overrides the resources to be used by the ``azure_ai_search`` tool consisting of index IDs and names.""" @@ -5403,9 +5387,6 @@ def __init__( *, code_interpreter: Optional["_models.UpdateCodeInterpreterToolResourceOptions"] = None, file_search: Optional["_models.UpdateFileSearchToolResourceOptions"] = None, - bing_grounding: Optional["_models.ConnectionListResource"] = None, - microsoft_fabric: Optional["_models.ConnectionListResource"] = None, - share_point: Optional["_models.ConnectionListResource"] = None, azure_ai_search: Optional["_models.AzureAISearchResource"] = None, ) -> None: ... diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index cebbfd8a1b03..e356228ecf7b 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -1,6 +1,4 @@ # pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -13,6 +11,7 @@ import inspect import json import logging +import math import base64 import asyncio import re @@ -59,6 +58,7 @@ Tuple, Set, get_origin, + cast, get_args, Union, ) @@ -120,7 +120,9 @@ class ConnectionProperties: :vartype token_credential: ~azure.core.credentials.TokenCredential """ - def __init__(self, *, connection: GetConnectionResponse, token_credential: TokenCredential = None) -> None: + def __init__( + self, *, connection: GetConnectionResponse, token_credential: Optional[TokenCredential] = None + ) -> None: self.id = connection.id self.name = connection.name self.authentication_type = connection.properties.auth_type @@ -130,7 +132,7 @@ def __init__(self, *, connection: GetConnectionResponse, token_credential: Token if connection.properties.target.endswith("/") else connection.properties.target ) - self.key: str = None + self.key: Optional[str] = None if hasattr(connection.properties, "credentials"): if hasattr(connection.properties.credentials, "key"): self.key = connection.properties.credentials.key @@ -198,7 +200,7 @@ def __init__( logger.debug("[SASTokenCredential.__init__] Exit. Given token expires on %s.", self._expires_on) @classmethod - def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime: + def _get_expiration_date_from_token(cls, jwt_token: str) -> datetime.datetime: payload = jwt_token.split(".")[1] padded_payload = payload + "=" * (4 - len(payload) % 4) # Add padding if necessary decoded_bytes = base64.urlsafe_b64decode(padded_payload) @@ -225,11 +227,31 @@ def _refresh_token(self) -> None: self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) - def get_token(self) -> AccessToken: + def get_token( + self, + *scopes: str, + claims: Optional[str] = None, + tenant_id: Optional[str] = None, + enable_cae: bool = False, + **kwargs: Any, + ) -> AccessToken: + """Request an access token for `scopes`. + + :param str scopes: The type of access needed. + + :keyword str claims: Additional claims required in the token, such as those returned in a resource + provider's claims challenge following an authorization failure. + :keyword str tenant_id: Optional tenant to include in the token request. + :keyword bool enable_cae: Indicates whether to enable Continuous Access Evaluation (CAE) for the requested + token. Defaults to False. + + :rtype: AccessToken + :return: An AccessToken instance containing the token string and its expiration time in Unix time. + """ logger.debug("SASTokenCredential.get_token] Enter") if self._expires_on < datetime.datetime.now(datetime.timezone.utc): self._refresh_token() - return AccessToken(self._sas_token, self._expires_on.timestamp()) + return AccessToken(self._sas_token, math.floor(self._expires_on.timestamp())) # Define type_map to translate Python type annotations to JSON Schema types @@ -253,10 +275,7 @@ def _map_type(annotation) -> Dict[str, Any]: if origin in {list, List}: args = get_args(annotation) item_type = args[0] if args else str - return { - "type": "array", - "items": _map_type(item_type) - } + return {"type": "array", "items": _map_type(item_type)} elif origin in {dict, Dict}: return {"type": "object"} elif origin is Union: @@ -337,8 +356,8 @@ def __init__(self, functions: Set[Callable[..., Any]]): def _create_function_dict(self, functions: Set[Callable[..., Any]]) -> Dict[str, Callable[..., Any]]: return {func.__name__: func for func in functions} - def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDefinition]: - specs = [] + def _build_function_definitions(self, functions: Dict[str, Any]) -> List[FunctionToolDefinition]: + specs: List[FunctionToolDefinition] = [] # Flexible regex to capture ':param : ' param_pattern = re.compile( r""" @@ -350,7 +369,7 @@ def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDef \s*:\s* # Colon ':' surrounded by optional whitespace (?P.+) # Description (rest of the line) """, - re.VERBOSE + re.VERBOSE, ) for name, func in functions.items(): @@ -365,8 +384,8 @@ def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDef match = param_pattern.match(line) if match: groups = match.groupdict() - param_name = groups.get('name') - param_desc = groups.get('description') + param_name = groups.get("name") + param_desc = groups.get("description") param_desc = param_desc.strip() if param_desc else "No description" param_descs[param_name] = param_desc.strip() @@ -376,10 +395,7 @@ def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDef param_type_info = _map_type(param.annotation) param_description = param_descs.get(param_name, "No description") - properties[param_name] = { - **param_type_info, - "description": param_description - } + properties[param_name] = {**param_type_info, "description": param_description} # If the parameter has no default value and is not optional, add it to the required list if param.default is inspect.Parameter.empty and not is_optional(param.annotation): @@ -388,11 +404,7 @@ def _build_function_definitions(self, functions: Dict[str, Any]) -> List[ToolDef function_def = FunctionDefinition( name=name, description=description, - parameters={ - "type": "object", - "properties": properties, - "required": required - }, + parameters={"type": "object", "properties": properties, "required": required}, ) tool_def = FunctionToolDefinition(function=function_def) specs.append(tool_def) @@ -428,7 +440,7 @@ def definitions(self) -> List[ToolDefinition]: :return: A list of function definitions. """ - return self._definitions + return cast(List[ToolDefinition], self._definitions) @property def resources(self) -> ToolResources: @@ -580,12 +592,12 @@ class BaseToolSet: Abstract class for a collection of tools that can be used by an agent. """ - def __init__(self): + def __init__(self) -> None: self._tools: List[Tool] = [] def validate_tool_type(self, tool: Tool) -> None: pass - + def add(self, tool: Tool): """ Add a tool to the tool set. @@ -628,7 +640,7 @@ def resources(self) -> ToolResources: """ Get the resources for all tools in the tool set. """ - tool_resources = {} + tool_resources: Dict[str, Any] = {} for tool in self._tools: resources = tool.resources for key, value in resources.items(): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index b766745c08ba..d0cb49c291b1 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -6436,7 +6436,7 @@ def _list_connections( """List the details of all the connections (not including their credentials). :keyword category: Category of the workspace connection. Known values are: "AzureOpenAI", - "Serverless", "AzureBlob", and "AIServices". Default value is None. + "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". Default value is None. :paramtype category: str or ~azure.ai.projects.models.ConnectionType :keyword include_all: Indicates whether to list datastores. Service default: do not list datastores. Default value is None. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 98540cafc8a4..0f4c10b14c2f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -65,7 +65,7 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection : bool = (os.getenv("USE_SERVERLESS_CONNECTION", None) == "true") + use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" if use_serverless_connection: connection = self._outer_instance.connections.get_default( @@ -98,17 +98,13 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": ) from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient( - endpoint=endpoint, credential=AzureKeyCredential(connection.key) - ) + client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) elif connection.authentication_type == AuthenticationType.AAD: # MaaS models do not yet support EntraID auth logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" ) - client = ChatCompletionsClient( - endpoint=endpoint, credential=connection.properties.token_credential - ) + client = ChatCompletionsClient(endpoint=endpoint, credential=connection.properties.token_credential) elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( @@ -134,7 +130,7 @@ def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": # Back-door way to access the old behavior where each AI model (non-OpenAI) was hosted on # a separate "Serverless" connection. This is now deprecated. - use_serverless_connection : bool = (os.getenv("USE_SERVERLESS_CONNECTION", None) == "true") + use_serverless_connection: bool = os.getenv("USE_SERVERLESS_CONNECTION", None) == "true" if use_serverless_connection: connection = self._outer_instance.connections.get_default( @@ -173,9 +169,7 @@ def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" ) - client = EmbeddingsClient( - endpoint=endpoint, credential=connection.properties.token_credential - ) + client = EmbeddingsClient(endpoint=endpoint, credential=connection.properties.token_credential) elif connection.authentication_type == AuthenticationType.SAS: # TODO - Not yet supported by the service. Expected 9/27. logger.debug( @@ -406,6 +400,7 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str, None], **kwargs) -> # Silently try to load a set of relevant Instrumentors try: from azure.core.settings import settings + settings.tracing_implementation = "opentelemetry" _ = settings.tracing_implementation() except ModuleNotFoundError as _: @@ -431,9 +426,7 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str, None], **kwargs) -> if not instrumentor.is_instrumented(): instrumentor.instrument() except Exception as exc: - logger.warning( - "Could not call `AIAgentsInstrumentor().instrument()` " + str(exc) - ) + logger.warning("Could not call `AIAgentsInstrumentor().instrument()` " + str(exc)) try: from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor @@ -770,7 +763,7 @@ def update_agent( top_p: Optional[float] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.Agent: """Modifies an existing agent. @@ -837,7 +830,7 @@ def update_agent( top_p: Optional[float] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, metadata: Optional[Dict[str, str]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.Agent: """Modifies an existing agent. @@ -884,7 +877,6 @@ def update_agent( :raises ~azure.core.exceptions.HttpResponseError: """ - @overload def update_agent( self, assistant_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any @@ -921,7 +913,7 @@ def update_agent( response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, content_type: str = "application/json", metadata: Optional[Dict[str, str]] = None, - **kwargs: Any + **kwargs: Any, ) -> _models.Agent: """Modifies an existing agent. @@ -1001,16 +993,26 @@ def update_agent( **kwargs, ) - def _validate_tools_and_tool_resources(self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources]): + def _validate_tools_and_tool_resources( + self, tools: Optional[List[_models.ToolDefinition]], tool_resources: Optional[_models.ToolResources] + ): if tool_resources is None: return if tools is None: tools = [] - if tool_resources.file_search is not None and not any(isinstance(tool, _models.FileSearchToolDefinition) for tool in tools): - raise ValueError("Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided") - if tool_resources.code_interpreter is not None and not any(isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools): - raise ValueError("Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided") + if tool_resources.file_search is not None and not any( + isinstance(tool, _models.FileSearchToolDefinition) for tool in tools + ): + raise ValueError( + "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" + ) + if tool_resources.code_interpreter is not None and not any( + isinstance(tool, _models.CodeInterpreterToolDefinition) for tool in tools + ): + raise ValueError( + "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" + ) def _get_toolset(self) -> Optional[_models.ToolSet]: """ diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py index b38b72c85c76..85e5b08f840f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py @@ -1,3 +1,6 @@ +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -10,9 +13,9 @@ import logging import os from azure.ai.projects import _types -from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union, cast from urllib.parse import urlparse -from azure.ai.projects.telemetry.agents._utils import * # pylint: disable=unused-wildcard-import +from azure.ai.projects.telemetry.agents._utils import * # pylint: disable=unused-wildcard-import # pylint: disable = no-name-in-module from azure.core import CaseInsensitiveEnumMeta # type: ignore @@ -21,7 +24,22 @@ from azure.ai.projects.aio.operations import AgentsOperations as AsyncAgentOperations from azure.ai.projects.models import _models, AgentRunStream from azure.ai.projects.models._enums import MessageRole, RunStepStatus -from azure.ai.projects.models._models import MessageAttachment, MessageDeltaChunk, RunStep, RunStepDeltaChunk, RunStepFunctionToolCall, RunStepToolCallDetails, SubmitToolOutputsAction, ThreadMessage, ThreadMessageOptions, ThreadRun, ToolDefinition, ToolOutput, ToolResources +from azure.ai.projects.models._models import ( + MessageAttachment, + MessageDeltaChunk, + MessageIncompleteDetails, + RunStep, + RunStepDeltaChunk, + RunStepFunctionToolCall, + RunStepToolCallDetails, + SubmitToolOutputsAction, + ThreadMessage, + ThreadMessageOptions, + ThreadRun, + ToolDefinition, + ToolOutput, + ToolResources, +) from azure.ai.projects.models._patch import AgentEventHandler, ToolSet _Unset: Any = object() @@ -218,14 +236,14 @@ def _remove_function_call_names_and_arguments(self, tool_calls: list) -> list: def _create_event_attributes( self, - thread_id: str = None, - agent_id: str = None, - thread_run_id: str = None, - message_id: str = None, - message_status: str = None, - usage: Optional[_models.RunStepCompletionUsage] = None - ) -> dict: - attrs = {GEN_AI_SYSTEM: AZ_AI_AGENT_SYSTEM} + thread_id: Optional[str] = None, + agent_id: Optional[str] = None, + thread_run_id: Optional[str] = None, + message_id: Optional[str] = None, + message_status: Optional[str] = None, + usage: Optional[_models.RunStepCompletionUsage] = None, + ) -> Dict[str, Any]: + attrs: Dict[str, Any] = {GEN_AI_SYSTEM: AZ_AI_AGENT_SYSTEM} if thread_id: attrs[GEN_AI_THREAD_ID] = thread_id @@ -247,7 +265,9 @@ def _create_event_attributes( return attrs - def add_thread_message_event(self, span, message: ThreadMessage, usage: Optional[_models.RunStepCompletionUsage] = None) -> None: + def add_thread_message_event( + self, span, message: ThreadMessage, usage: Optional[_models.RunStepCompletionUsage] = None + ) -> None: content_body = {} if _trace_agents_content: for content in message.content: @@ -270,21 +290,23 @@ def add_thread_message_event(self, span, message: ThreadMessage, usage: Optional thread_run_id=message.run_id, message_status=message.status, incomplete_details=message.incomplete_details, - usage=usage) + usage=usage, + ) def _add_message_event( self, span, role: str, content: Any, - attachments: Any = None, #Optional[List[MessageAttachment]] or dict + attachments: Any = None, # Optional[List[MessageAttachment]] or dict thread_id: Optional[str] = None, agent_id: Optional[str] = None, message_id: Optional[str] = None, thread_run_id: Optional[str] = None, message_status: Optional[str] = None, - incomplete_details: Optional[str] = None, - usage: Optional[_models.RunStepCompletionUsage] = None) -> None: + incomplete_details: Optional[MessageIncompleteDetails] = None, + usage: Optional[_models.RunStepCompletionUsage] = None, + ) -> None: # TODO document new fields event_body = {} @@ -308,7 +330,8 @@ def _add_message_event( thread_run_id=thread_run_id, message_id=message_id, message_status=message_status, - usage=usage) + usage=usage, + ) attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body) span.span_instance.add_event(name=f"gen_ai.{role}.message", attributes=attributes) @@ -324,14 +347,15 @@ def _get_field(self, obj: Any, field: str) -> Any: def _add_instructions_event( self, span: "AbstractSpan", - instructions: str, - additional_instructions: str, + instructions: Optional[str], + additional_instructions: Optional[str], agent_id: Optional[str] = None, - thread_id: Optional[str] = None) -> None: + thread_id: Optional[str] = None, + ) -> None: if not instructions: return - event_body = {} + event_body: Dict[str, Any] = {} if _trace_agents_content and (instructions or additional_instructions): if instructions and additional_instructions: event_body["content"] = f"{instructions} {additional_instructions}" @@ -350,22 +374,29 @@ def _get_role(self, role: Union[str, MessageRole]) -> str: return role.value return role - + def _add_tool_assistant_message_event(self, span, step: RunStep) -> None: # do we want a new event for it ? - tool_calls = [{"id": t.id, - "type": t.type, - "function" : { - "name": t.function.name, - "arguments": json.loads(t.function.arguments) - } if isinstance(t, RunStepFunctionToolCall) else None, - } for t in step.step_details.tool_calls] - - attributes = self._create_event_attributes(thread_id=step.thread_id, - agent_id=step.assistant_id, - thread_run_id=step.run_id, - message_status=step.status, - usage=step.usage) + tool_calls = [ + { + "id": t.id, + "type": t.type, + "function": ( + {"name": t.function.name, "arguments": json.loads(t.function.arguments)} + if isinstance(t, RunStepFunctionToolCall) + else None + ), + } + for t in cast(RunStepToolCallDetails, step.step_details).tool_calls + ] + + attributes = self._create_event_attributes( + thread_id=step.thread_id, + agent_id=step.assistant_id, + thread_run_id=step.run_id, + message_status=step.status, + usage=step.usage, + ) attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls}) span.span_instance.add_event(name=f"gen_ai.assistant.message", attributes=attributes) @@ -394,7 +425,7 @@ def start_thread_run_span( max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - ) -> "AbstractSpan": + ) -> "Optional[AbstractSpan]": span = start_span( operation_name, project_name, @@ -405,9 +436,12 @@ def start_thread_run_span( top_p=top_p, max_prompt_tokens=max_prompt_tokens, max_completion_tokens=max_completion_tokens, - response_format=response_format.value if response_format else None) - if span and span.span_instance.is_recording: - self._add_instructions_event(span, instructions, additional_instructions, thread_id=thread_id, agent_id=agent_id) + response_format=response_format.value if response_format else None, + ) + if span and span.span_instance.is_recording and instructions and additional_instructions: + self._add_instructions_event( + span, instructions, additional_instructions, thread_id=thread_id, agent_id=agent_id + ) if additional_messages: for message in additional_messages: @@ -421,21 +455,17 @@ def start_submit_tool_outputs_span( run_id: str, tool_outputs: List[ToolOutput] = _Unset, event_handler: Optional[AgentEventHandler] = None, - ) -> "AbstractSpan": + ) -> "Optional[AbstractSpan]": run_span = event_handler.span if isinstance(event_handler, _AgentEventHandlerTraceWrapper) else None recorded = self._add_tool_message_events(run_span, tool_outputs) - span = start_span(OperationName.SUBMIT_TOOL_OUTPUTS, - project_name, - thread_id=thread_id, - run_id=run_id) + span = start_span(OperationName.SUBMIT_TOOL_OUTPUTS, project_name, thread_id=thread_id, run_id=run_id) if not recorded: self._add_tool_message_events(span, tool_outputs) return span - - def _add_tool_message_events(self, span, tool_outputs: List[ToolOutput]) -> bool: + def _add_tool_message_events(self, span: "Optional[AbstractSpan]", tool_outputs: List[ToolOutput]) -> bool: if span and span.span_instance.is_recording: for tool_output in tool_outputs: body = {"content": tool_output["output"], "id": tool_output["tool_call_id"]} @@ -457,14 +487,16 @@ def start_create_agent_span( temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, - ) -> "AbstractSpan": - span = start_span(OperationName.CREATE_AGENT, - project_name, - span_name=f"{OperationName.CREATE_AGENT.value} {name}", - model=model, - temperature=temperature, - top_p=top_p, - response_format=response_format.value if response_format else None) + ) -> "Optional[AbstractSpan]": + span = start_span( + OperationName.CREATE_AGENT, + project_name, + span_name=f"{OperationName.CREATE_AGENT.value} {name}", + model=model, + temperature=temperature, + top_p=top_p, + response_format=response_format.value if response_format else None, + ) if span and span.span_instance.is_recording: if name: span.add_attribute(GEN_AI_AGENT_NAME, name) @@ -477,9 +509,9 @@ def start_create_agent_span( def start_create_thread_span( self, project_name: str, - messages: Optional[List[ThreadMessageOptions]] = None, + messages: Optional[List[ThreadMessage]] = None, tool_resources: Optional[ToolResources] = None, - ) -> "AbstractSpan": + ) -> "Optional[AbstractSpan]": span = start_span(OperationName.CREATE_THREAD, project_name) if span and span.span_instance.is_recording: for message in messages or []: @@ -487,26 +519,22 @@ def start_create_thread_span( return span - def start_list_messages_span( - self, - project_name: str, - thread_id: str - ) -> "AbstractSpan": + def start_list_messages_span(self, project_name: str, thread_id: str) -> "Optional[AbstractSpan]": return start_span(OperationName.LIST_MESSAGES, project_name, thread_id=thread_id) - + def trace_create_agent(self, function, *args, **kwargs): project_name = args[0]._config.project_name - name=kwargs.get("name") - model=kwargs.get("model") - description=kwargs.get("description") - instructions=kwargs.get("instructions") - tools=kwargs.get("tools") - tool_resources=kwargs.get("tool_resources") - toolset=kwargs.get("toolset") - temperature=kwargs.get("temperature") - top_p=kwargs.get("top_p") - response_format=kwargs.get("response_format") - + name = kwargs.get("name") + model = kwargs.get("model") + description = kwargs.get("description") + instructions = kwargs.get("instructions") + tools = kwargs.get("tools") + tool_resources = kwargs.get("tool_resources") + toolset = kwargs.get("toolset") + temperature = kwargs.get("temperature") + top_p = kwargs.get("top_p") + response_format = kwargs.get("response_format") + with self.start_create_agent_span( project_name=project_name, name=name, @@ -518,7 +546,8 @@ def trace_create_agent(self, function, *args, **kwargs): toolset=toolset, temperature=temperature, top_p=top_p, - response_format=response_format) as span: + response_format=response_format, + ) as span: try: result = function(*args, **kwargs) span.add_attribute(GEN_AI_AGENT_ID, result.id) @@ -539,17 +568,17 @@ def trace_create_agent(self, function, *args, **kwargs): async def trace_create_agent_async(self, function, *args, **kwargs): project_name = args[0]._config.project_name - name=kwargs.get("name") - model=kwargs.get("model") - description=kwargs.get("description") - instructions=kwargs.get("instructions") - tools=kwargs.get("tools") - tool_resources=kwargs.get("tool_resources") - toolset=kwargs.get("toolset") - temperature=kwargs.get("temperature") - top_p=kwargs.get("top_p") - response_format=kwargs.get("response_format") - + name = kwargs.get("name") + model = kwargs.get("model") + description = kwargs.get("description") + instructions = kwargs.get("instructions") + tools = kwargs.get("tools") + tool_resources = kwargs.get("tool_resources") + toolset = kwargs.get("toolset") + temperature = kwargs.get("temperature") + top_p = kwargs.get("top_p") + response_format = kwargs.get("response_format") + with self.start_create_agent_span( project_name=project_name, name=name, @@ -561,7 +590,8 @@ async def trace_create_agent_async(self, function, *args, **kwargs): toolset=toolset, temperature=temperature, top_p=top_p, - response_format=response_format) as span: + response_format=response_format, + ) as span: try: result = await function(*args, **kwargs) span.add_attribute(GEN_AI_AGENT_ID, result.id) @@ -582,11 +612,9 @@ async def trace_create_agent_async(self, function, *args, **kwargs): def trace_create_thread(self, function, *args, **kwargs): project_name = args[0]._config.project_name - messages=kwargs.get("messages") - - with self.start_create_thread_span( - project_name=project_name, - messages=messages) as span: + messages = kwargs.get("messages") + + with self.start_create_thread_span(project_name=project_name, messages=messages) as span: try: result = function(*args, **kwargs) span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) @@ -607,11 +635,9 @@ def trace_create_thread(self, function, *args, **kwargs): async def trace_create_thread_async(self, function, *args, **kwargs): project_name = args[0]._config.project_name - messages=kwargs.get("messages") - - with self.start_create_thread_span( - project_name=project_name, - messages=messages) as span: + messages = kwargs.get("messages") + + with self.start_create_thread_span(project_name=project_name, messages=messages) as span: try: result = await function(*args, **kwargs) span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) @@ -635,15 +661,12 @@ def trace_create_message(self, function, *args, **kwargs): messages = kwargs.get("messages") thread_id = kwargs.get("thread_id") role = kwargs.get("role") - content= kwargs.get("content") + content = kwargs.get("content") attachments = kwargs.get("attachments") - + with self.start_create_message_span( - project_name=project_name, - thread_id=thread_id, - content=content, - role=role, - attachments=attachments) as span: + project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments + ) as span: try: result = function(*args, **kwargs) span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) @@ -667,15 +690,12 @@ async def trace_create_message_async(self, function, *args, **kwargs): messages = kwargs.get("messages") thread_id = kwargs.get("thread_id") role = kwargs.get("role") - content= kwargs.get("content") + content = kwargs.get("content") attachments = kwargs.get("attachments") - + with self.start_create_message_span( - project_name=project_name, - thread_id=thread_id, - content=content, - role=role, - attachments=attachments) as span: + project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments + ) as span: try: result = await function(*args, **kwargs) span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) @@ -723,7 +743,8 @@ def trace_create_run(self, operation_name, function, *args, **kwargs): top_p=top_p, max_prompt_tokens=max_prompt_tokens, max_completion_tokens=max_completion_tokens, - response_format=response_format) as span: + response_format=response_format, + ) as span: try: result = function(*args, **kwargs) self.set_end_run(span, result) @@ -771,7 +792,8 @@ async def trace_create_run_async(self, operation_name, function, *args, **kwargs top_p=top_p, max_prompt_tokens=max_prompt_tokens, max_completion_tokens=max_completion_tokens, - response_format=response_format) as span: + response_format=response_format, + ) as span: try: result = await function(*args, **kwargs) if span.span_instance.is_recording: @@ -779,7 +801,7 @@ async def trace_create_run_async(self, operation_name, function, *args, **kwargs span.add_attribute(GEN_AI_RESPONSE_MODEL, result.model) if result.usage: result.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, result.usage.prompt_tokens) - result.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, result.usage.completion_tokens) + result.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, result.usage.completion_tokens) result.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) except Exception as exc: # Set the span status to error @@ -802,16 +824,17 @@ def trace_submit_tool_outputs(self, stream, function, *args, **kwargs): run_id = kwargs.get("run_id") tool_outputs = kwargs.get("tool_outputs") event_handler = kwargs.get("event_handler") - + with self.start_submit_tool_outputs_span( project_name=project_name, thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, - event_handler=event_handler) as span: + event_handler=event_handler, + ) as span: try: if stream: - kwargs['event_handler'] = self.wrap_handler(event_handler, span) + kwargs["event_handler"] = self.wrap_handler(event_handler, span) result = function(*args, **kwargs) if not isinstance(result, AgentRunStream): @@ -837,16 +860,17 @@ async def trace_submit_tool_outputs_async(self, stream, function, *args, **kwarg run_id = kwargs.get("run_id") tool_outputs = kwargs.get("tool_outputs") event_handler = kwargs.get("event_handler") - + with self.start_submit_tool_outputs_span( project_name=project_name, thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, - event_handler=event_handler) as span: + event_handler=event_handler, + ) as span: try: if stream: - kwargs['event_handler'] = self.wrap_handler(event_handler, span) + kwargs["event_handler"] = self.wrap_handler(event_handler, span) result = await function(*args, **kwargs) if not isinstance(result, AgentRunStream): @@ -943,13 +967,14 @@ def trace_create_stream(self, function, *args, **kwargs): top_p=top_p, max_prompt_tokens=max_prompt_tokens, max_completion_tokens=max_completion_tokens, - response_format=response_format) - + response_format=response_format, + ) + # TODO: how to keep span active in the current context without existing? # TODO: dummy span for none with span.change_context(span.span_instance): try: - kwargs['event_handler'] = self.wrap_handler(event_handler, span) + kwargs["event_handler"] = self.wrap_handler(event_handler, span) result = function(*args, **kwargs) except Exception as exc: # Set the span status to error @@ -997,13 +1022,14 @@ async def trace_create_stream_async(self, function, *args, **kwargs): top_p=top_p, max_prompt_tokens=max_prompt_tokens, max_completion_tokens=max_completion_tokens, - response_format=response_format) - + response_format=response_format, + ) + # TODO: how to keep span active in the current context without existing? # TODO: dummy span for none with span.change_context(span.span_instance): try: - kwargs['event_handler'] = self.wrap_handler(event_handler, span) + kwargs["event_handler"] = self.wrap_handler(event_handler, span) result = await function(*args, **kwargs) except Exception as exc: # Set the span status to error @@ -1024,9 +1050,7 @@ def trace_list_messages(self, function, *args, **kwargs): project_name = args[0]._config.project_name thread_id = kwargs.get("thread_id") - with self.start_list_messages_span( - project_name=project_name, - thread_id=thread_id) as span: + with self.start_list_messages_span(project_name=project_name, thread_id=thread_id) as span: try: result = function(*args, **kwargs) for message in result.data: @@ -1051,9 +1075,7 @@ async def trace_list_messages_async(self, function, *args, **kwargs): project_name = args[0]._config.project_name thread_id = kwargs.get("thread_id") - with self.start_list_messages_span( - project_name=project_name, - thread_id=thread_id) as span: + with self.start_list_messages_span(project_name=project_name, thread_id=thread_id) as span: try: result = await function(*args, **kwargs) for message in result.data: @@ -1081,10 +1103,13 @@ def handle_run_stream_exit(self, function, *args, **kwargs): exc_tb = kwargs.get("exc_tb") # TODO: is it a good idea? # if not, we'll need to wrap stream and call exit - if agent_run_stream.event_handler and agent_run_stream.event_handler.__class__.__name__ == "_AgentEventHandlerTraceWrapper": + if ( + agent_run_stream.event_handler + and agent_run_stream.event_handler.__class__.__name__ == "_AgentEventHandlerTraceWrapper" + ): agent_run_stream.event_handler.__exit__(exc_type, exc_val, exc_tb) - def wrap_handler(self, handler: "_models.AgentEventHandler", span: "AbstractSpan") -> "_models.AgentEventHandler": + def wrap_handler(self, handler: "AgentEventHandler", span: "AbstractSpan") -> "AgentEventHandler": if isinstance(handler, _AgentEventHandlerTraceWrapper): return handler @@ -1099,8 +1124,8 @@ def start_create_message_span( thread_id: str, content: str, role: Union[str, MessageRole] = _Unset, - attachments: Optional[List[MessageAttachment]] = None - ) -> "AbstractSpan": + attachments: Optional[List[MessageAttachment]] = None, + ) -> "Optional[AbstractSpan]": role_str = self._get_role(role) span = start_span(OperationName.CREATE_MESSAGE, project_name, thread_id=thread_id) if span and span.span_instance.is_recording: @@ -1205,7 +1230,7 @@ async def inner(*args, **kwargs): elif class_function_name.startswith("AgentsOperations.create_thread"): return await self.trace_create_thread_async(function, *args, **kwargs) elif class_function_name.startswith("AgentsOperations.create_message"): - return await self.trace_create_message_async(function,*args, **kwargs) + return await self.trace_create_message_async(function, *args, **kwargs) elif class_function_name.startswith("AgentsOperations.create_run"): return await self.trace_create_run_async(OperationName.START_THREAD_RUN, function, *args, **kwargs) elif class_function_name.startswith("AgentsOperations.create_and_process_run"): @@ -1241,25 +1266,97 @@ def _agents_apis(self): ("azure.ai.projects.operations", "AgentsOperations", "create_thread", TraceType.AGENTS, "thread_create"), ("azure.ai.projects.operations", "AgentsOperations", "create_message", TraceType.AGENTS, "message_create"), ("azure.ai.projects.operations", "AgentsOperations", "create_run", TraceType.AGENTS, "create_run"), - ("azure.ai.projects.operations", "AgentsOperations", "create_and_process_run", TraceType.AGENTS, "create_and_process_run"), - ("azure.ai.projects.operations", "AgentsOperations", "submit_tool_outputs_to_run", TraceType.AGENTS, "submit_tool_outputs_to_run"), - ("azure.ai.projects.operations", "AgentsOperations", "submit_tool_outputs_to_stream", TraceType.AGENTS, "submit_tool_outputs_to_stream"), - ("azure.ai.projects.operations", "AgentsOperations", "_handle_submit_tool_outputs", TraceType.AGENTS, "_handle_submit_tool_outputs"), + ( + "azure.ai.projects.operations", + "AgentsOperations", + "create_and_process_run", + TraceType.AGENTS, + "create_and_process_run", + ), + ( + "azure.ai.projects.operations", + "AgentsOperations", + "submit_tool_outputs_to_run", + TraceType.AGENTS, + "submit_tool_outputs_to_run", + ), + ( + "azure.ai.projects.operations", + "AgentsOperations", + "submit_tool_outputs_to_stream", + TraceType.AGENTS, + "submit_tool_outputs_to_stream", + ), + ( + "azure.ai.projects.operations", + "AgentsOperations", + "_handle_submit_tool_outputs", + TraceType.AGENTS, + "_handle_submit_tool_outputs", + ), ("azure.ai.projects.operations", "AgentsOperations", "create_stream", TraceType.AGENTS, "create_stream"), ("azure.ai.projects.operations", "AgentsOperations", "list_messages", TraceType.AGENTS, "list_messages"), ("azure.ai.projects.models", "AgentRunStream", "__exit__", TraceType.AGENTS, "__exit__"), ) async_apis = ( ("azure.ai.projects.aio.operations", "AgentsOperations", "create_agent", TraceType.AGENTS, "agent_create"), - ("azure.ai.projects.aio.operations", "AgentsOperations", "create_thread", TraceType.AGENTS, "agents_thread_create"), - ("azure.ai.projects.aio.operations", "AgentsOperations", "create_message", TraceType.AGENTS, "agents_thread_message"), + ( + "azure.ai.projects.aio.operations", + "AgentsOperations", + "create_thread", + TraceType.AGENTS, + "agents_thread_create", + ), + ( + "azure.ai.projects.aio.operations", + "AgentsOperations", + "create_message", + TraceType.AGENTS, + "agents_thread_message", + ), ("azure.ai.projects.aio.operations", "AgentsOperations", "create_run", TraceType.AGENTS, "create_run"), - ("azure.ai.projects.aio.operations", "AgentsOperations", "create_and_process_run", TraceType.AGENTS, "create_and_process_run"), - ("azure.ai.projects.aio.operations", "AgentsOperations", "submit_tool_outputs_to_run", TraceType.AGENTS, "submit_tool_outputs_to_run"), - ("azure.ai.projects.aio.operations", "AgentsOperations", "submit_tool_outputs_to_stream", TraceType.AGENTS, "submit_tool_outputs_to_stream"), - ("azure.ai.projects.aio.operations", "AgentsOperations", "_handle_submit_tool_outputs", TraceType.AGENTS, "_handle_submit_tool_outputs"), - ("azure.ai.projects.aio.operations", "AgentsOperations", "create_stream", TraceType.AGENTS, "create_stream"), - ("azure.ai.projects.aio.operations", "AgentsOperations", "list_messages", TraceType.AGENTS, "list_messages"), + ( + "azure.ai.projects.aio.operations", + "AgentsOperations", + "create_and_process_run", + TraceType.AGENTS, + "create_and_process_run", + ), + ( + "azure.ai.projects.aio.operations", + "AgentsOperations", + "submit_tool_outputs_to_run", + TraceType.AGENTS, + "submit_tool_outputs_to_run", + ), + ( + "azure.ai.projects.aio.operations", + "AgentsOperations", + "submit_tool_outputs_to_stream", + TraceType.AGENTS, + "submit_tool_outputs_to_stream", + ), + ( + "azure.ai.projects.aio.operations", + "AgentsOperations", + "_handle_submit_tool_outputs", + TraceType.AGENTS, + "_handle_submit_tool_outputs", + ), + ( + "azure.ai.projects.aio.operations", + "AgentsOperations", + "create_stream", + TraceType.AGENTS, + "create_stream", + ), + ( + "azure.ai.projects.aio.operations", + "AgentsOperations", + "list_messages", + TraceType.AGENTS, + "list_messages", + ), ) return sync_apis, async_apis @@ -1369,13 +1466,13 @@ def _is_content_recording_enabled(self) -> bool: class _AgentEventHandlerTraceWrapper(AgentEventHandler): - def __init__(self, inner_handler: AgentEventHandler, instrumentor: AIAgentsInstrumentor, span: "AbstractSpan"): + def __init__(self, inner_handler: AgentEventHandler, instrumentor: _AIAgentsInstrumentorPreview, span: "AbstractSpan"): super().__init__() self.span = span self.inner_handler = inner_handler self.ended = False - self.last_run = None - self.last_message = None + self.last_run: Optional[ThreadRun] = None + self.last_message: Optional[ThreadMessage] = None self.instrumentor = instrumentor def on_message_delta(self, delta: "MessageDeltaChunk") -> None: @@ -1405,7 +1502,7 @@ def on_run_step(self, step: "RunStep") -> None: if step.type == "tool_calls" and isinstance(step.step_details, RunStepToolCallDetails): self.instrumentor._add_tool_assistant_message_event(self.span, step) elif step.type == "message_creation" and step.status == RunStepStatus.COMPLETED: - self.instrumentor.add_thread_message_event(self.span, self.last_message, step.usage) + self.instrumentor.add_thread_message_event(self.span, cast(ThreadMessage, self.last_message), step.usage) self.last_message = None def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: @@ -1435,4 +1532,4 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) self.span.__exit__(exc_type, exc_val, exc_tb) - self.span.finish() \ No newline at end of file + self.span.finish() diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py index 524ebad14e2f..6a17ffb45f0e 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py @@ -61,39 +61,42 @@ class OperationName(Enum): def trace_tool_execution( tool_call_id: str, tool_name: str, - thread_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow - agent_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow - run_id: Optional[str] = None # TODO: would be nice to have this, but need to propagate somehow -) -> "AbstractSpan": - span = start_span(OperationName.EXECUTE_TOOL, - server_address=None, - span_name=f"execute_tool {tool_name}", - thread_id=thread_id, - agent_id=agent_id, - run_id=run_id, - gen_ai_system=None) # it's a client code execution, not GenAI span + thread_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow + agent_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow + run_id: Optional[str] = None, # TODO: would be nice to have this, but need to propagate somehow +) -> "Optional[AbstractSpan]": + span = start_span( + OperationName.EXECUTE_TOOL, + server_address=None, + span_name=f"execute_tool {tool_name}", + thread_id=thread_id, + agent_id=agent_id, + run_id=run_id, + gen_ai_system=None, + ) # it's a client code execution, not GenAI span if span is not None and span.span_instance.is_recording: span.add_attribute(GEN_AI_TOOL_CALL_ID, tool_call_id) span.add_attribute(GEN_AI_TOOL_NAME, tool_name) return span + def start_span( - operation_name: OperationName, - server_address: str, - span_name: str = None, - thread_id: str = None, - agent_id: str = None, - run_id: str = None, - model: str = None, - temperature: str = None, - top_p: str = None, - max_prompt_tokens: Optional[int] = None, - max_completion_tokens: Optional[int] = None, - response_format: Optional[str] = None, - gen_ai_system: str = AZ_AI_AGENT_SYSTEM, - kind: SpanKind = SpanKind.CLIENT -) -> "AbstractSpan": + operation_name: OperationName, + server_address: Optional[str], + span_name: Optional[str] = None, + thread_id: Optional[str] = None, + agent_id: Optional[str] = None, + run_id: Optional[str] = None, + model: Optional[str] = None, + temperature: Optional[float] = None, + top_p: Optional[float] = None, + max_prompt_tokens: Optional[int] = None, + max_completion_tokens: Optional[int] = None, + response_format: Optional[str] = None, + gen_ai_system: Optional[str] = AZ_AI_AGENT_SYSTEM, + kind: SpanKind = SpanKind.CLIENT, +) -> "Optional[AbstractSpan]": if _span_impl_type is None: return None @@ -121,10 +124,10 @@ def start_span( span.add_attribute(GEN_AI_REQUEST_MODEL, model) if temperature: - span.add_attribute(GEN_AI_REQUEST_TEMPERATURE, temperature) + span.add_attribute(GEN_AI_REQUEST_TEMPERATURE, str(temperature)) if top_p: - span.add_attribute(GEN_AI_REQUEST_TOP_P, top_p) + span.add_attribute(GEN_AI_REQUEST_TOP_P, str(top_p)) if max_prompt_tokens: span.add_attribute(GEN_AI_REQUEST_MAX_INPUT_TOKENS, max_prompt_tokens) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py index 0de29ddd2a32..f5dc0a072846 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py @@ -36,6 +36,7 @@ tracer = trace.get_tracer(__name__) + @tracer.start_as_current_span(__file__) async def main(): diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py index 51723d56917b..fded860098c4 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py @@ -39,6 +39,7 @@ tracer = trace.get_tracer(__name__) + @tracer.start_as_current_span(__file__) async def main(): diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py index fd1c27ff25da..224d8eb3a318 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py @@ -63,7 +63,9 @@ async def main(): print(f"Created thread, thread ID: {thread.id}") message = await project_client.agents.create_message( - thread_id=thread.id, role="user", content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?" + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", ) print(f"Created message, message ID: {message.id}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py index 36664ed72a04..064d86c47138 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py @@ -84,4 +84,4 @@ async def main(): if __name__ == "__main__": - asyncio.run(main()) \ No newline at end of file + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py index f1f7e34cade3..f475d1c1ef93 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py @@ -61,7 +61,9 @@ thread = project_client.agents.create_thread() print(f"Created thread, thread ID: {thread.id}") - message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) print(f"Created message, message ID: {message.id}") run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing.py index 5466568be70b..442a995786a1 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_console_tracing.py @@ -61,7 +61,9 @@ thread = project_client.agents.create_thread() print(f"Created thread, thread ID: {thread.id}") - message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) print(f"Created message, message ID: {message.id}") run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py index c3fa5fb200a3..447afbe0acd2 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py @@ -24,7 +24,7 @@ messages, which may contain personal data. False by default. """ -import os, sys,time, json +import os, sys, time, json from typing import Any, Callable, Set from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential @@ -39,8 +39,7 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) # Enable Azure Monitor tracing @@ -54,9 +53,10 @@ scenario = os.path.basename(__file__) tracer = trace.get_tracer(__name__) + # The tracer.start_as_current_span decorator will trace the function call and enable adding additional attributes # to the span in the function implementation. Note that this will trace the function parameters and their values. -@tracer.start_as_current_span("fetch_weather") # type: ignore +@tracer.start_as_current_span("fetch_weather") # type: ignore def fetch_weather(location: str) -> str: """ Fetches the weather information for the specified location. @@ -77,6 +77,7 @@ def fetch_weather(location: str) -> str: weather_json = json.dumps({"weather": weather}) return weather_json + # Statically defined user functions for fast reference user_functions: Set[Callable[..., Any]] = { fetch_weather, @@ -125,16 +126,20 @@ def fetch_weather(location: str) -> str: if isinstance(tool_call, RequiredFunctionToolCall): try: output = functions.execute(tool_call) - tool_outputs.append({ - "tool_call_id": tool_call.id, - "output": output, - }) + tool_outputs.append( + { + "tool_call_id": tool_call.id, + "output": output, + } + ) except Exception as e: print(f"Error executing tool_call {tool_call.id}: {e}") print(f"Tool outputs: {tool_outputs}") if tool_outputs: - project_client.agents.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs) + project_client.agents.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) print(f"Current run status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py index b871f2297b83..b20d862ea2b4 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py @@ -29,7 +29,7 @@ messages, which may contain personal data. False by default. """ -import os, sys,time, json +import os, sys, time, json from typing import Any, Callable, Set from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential @@ -43,8 +43,7 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"] + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] ) # Enable console tracing @@ -55,9 +54,10 @@ scenario = os.path.basename(__file__) tracer = trace.get_tracer(__name__) + # The tracer.start_as_current_span decorator will trace the function call and enable adding additional attributes # to the span in the function implementation. Note that this will trace the function parameters and their values. -@tracer.start_as_current_span("fetch_weather") # type: ignore +@tracer.start_as_current_span("fetch_weather") # type: ignore def fetch_weather(location: str) -> str: """ Fetches the weather information for the specified location. @@ -78,6 +78,7 @@ def fetch_weather(location: str) -> str: weather_json = json.dumps({"weather": weather}) return weather_json + # Statically defined user functions for fast reference user_functions: Set[Callable[..., Any]] = { fetch_weather, @@ -126,16 +127,20 @@ def fetch_weather(location: str) -> str: if isinstance(tool_call, RequiredFunctionToolCall): try: output = functions.execute(tool_call) - tool_outputs.append({ - "tool_call_id": tool_call.id, - "output": output, - }) + tool_outputs.append( + { + "tool_call_id": tool_call.id, + "output": output, + } + ) except Exception as e: print(f"Error executing tool_call {tool_call.id}: {e}") print(f"Tool outputs: {tool_outputs}") if tool_outputs: - project_client.agents.submit_tool_outputs_to_run(thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs) + project_client.agents.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) print(f"Current run status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py index bcd2428b9959..c033fc1df0dd 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py @@ -25,7 +25,7 @@ """ -import os,sys +import os, sys from azure.ai.projects import AIProjectClient from azure.ai.projects.models._enums import RunStepType from azure.identity import DefaultAzureCredential @@ -79,6 +79,7 @@ def on_done(self) -> None: def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + # Enable Azure Monitor tracing application_insights_connection_string = project_client.telemetry.get_connection_string() if not application_insights_connection_string: @@ -101,7 +102,9 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: thread = project_client.agents.create_thread() print(f"Created thread, thread ID {thread.id}") - message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) print(f"Created message, message ID {message.id}") with project_client.agents.create_stream( diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py index ab5b8f89e3a6..49320a9555b8 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py @@ -29,7 +29,7 @@ messages, which may contain personal data. False by default. """ -import os,sys +import os, sys from azure.ai.projects import AIProjectClient from azure.ai.projects.models._enums import RunStepType from azure.identity import DefaultAzureCredential @@ -83,6 +83,7 @@ def on_done(self) -> None: def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + # Enable console tracing # or, if you have local OTLP endpoint running, change it to # project_client.telemetry.enable(destination="http://localhost:4317") @@ -102,7 +103,9 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: thread = project_client.agents.create_thread() print(f"Created thread, thread ID {thread.id}") - message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) print(f"Created message, message ID {message.id}") with project_client.agents.create_stream( diff --git a/sdk/ai/azure-ai-projects/samples/agents/user_functions.py b/sdk/ai/azure-ai-projects/samples/agents/user_functions.py index 27304bf1a90c..0dfada80689b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/user_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/user_functions.py @@ -19,7 +19,7 @@ def fetch_current_datetime(format: Optional[str] = None) -> str: :rtype: str """ current_time = datetime.datetime.now() - + # Use the provided format if available, else use a default format if format: time_format = format @@ -68,7 +68,7 @@ def send_email(recipient: str, subject: str, body: str) -> str: def calculate_sum(a: int, b: int) -> str: """Calculates the sum of two integers. - + :param a (int): First integer. :rtype: int :param b (int): Second integer. @@ -83,20 +83,20 @@ def calculate_sum(a: int, b: int) -> str: def convert_temperature(celsius: float) -> str: """Converts temperature from Celsius to Fahrenheit. - + :param celsius (float): Temperature in Celsius. :rtype: float :return: Temperature in Fahrenheit. :rtype: str """ - fahrenheit = (celsius * 9/5) + 32 + fahrenheit = (celsius * 9 / 5) + 32 return json.dumps({"fahrenheit": fahrenheit}) def toggle_flag(flag: bool) -> str: """Toggles a boolean flag. - + :param flag (bool): The flag to toggle. :rtype: bool @@ -109,7 +109,7 @@ def toggle_flag(flag: bool) -> str: def merge_dicts(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> str: """Merges two dictionaries. - + :param dict1 (Dict[str, Any]): First dictionary. :rtype: dict :param dict2 (Dict[str, Any]): Second dictionary. @@ -125,7 +125,7 @@ def merge_dicts(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> str: def get_user_info(user_id: int) -> str: """Retrieves user information based on user ID. - + :param user_id (int): ID of the user. :rtype: int @@ -143,7 +143,7 @@ def get_user_info(user_id: int) -> str: def longest_word_in_sentences(sentences: List[str]) -> str: """Finds the longest word in each sentence. - + :param sentences (List[str]): A list of sentences. :return: A JSON string mapping each sentence to its longest word. :rtype: str diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py index 124eedeb8211..83563a1d4f04 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -133,7 +133,9 @@ async def sample_connections_async(): else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - response = await client.complete(model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")]) + response = await client.complete( + model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + ) await client.close() print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index fd4b68cf38e2..626579139367 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -43,7 +43,7 @@ ) with project_client: - + # List the properties of all connections connections = project_client.connections.list() print(f"====> Listing of all connections (found {len(connections)}):") @@ -123,6 +123,8 @@ else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - response = client.complete(model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")]) + response = client.complete( + model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + ) client.close() print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py index 16144f785905..5fade8dfd3cd 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py @@ -39,7 +39,9 @@ async def sample_get_chat_completions_client_async(): async with await project_client.inference.get_chat_completions_client() as client: - response = await client.complete(model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")]) + response = await client.complete( + model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + ) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py index 474d1a76ecff..10d0baaeec44 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py @@ -39,7 +39,9 @@ async def sample_get_embeddings_client_async(): # Get an authenticated async azure.ai.inference embeddings client for your default Serverless connection: async with await project_client.inference.get_embeddings_client() as client: - response = await client.embed(model=model_deployment_name, input=["first phrase", "second phrase", "third phrase"]) + response = await client.embed( + model=model_deployment_name, input=["first phrase", "second phrase", "third phrase"] + ) for item in response.data: length = len(item.embedding) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py index 9e3ef43786a4..28311297da67 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client.py @@ -36,6 +36,8 @@ with project_client.inference.get_chat_completions_client() as client: - response = client.complete(model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")]) + response = client.complete( + model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + ) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py index 005325ec02cc..55e282ad57f4 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing.py @@ -52,6 +52,8 @@ # Get an authenticated azure.ai.inference ChatCompletionsClient for your default Serverless connection: with project_client.inference.get_chat_completions_client() as client: - response = client.complete(model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")]) + response = client.complete( + model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + ) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py index 6dcb00ec380a..bb7254a594e8 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py @@ -50,6 +50,8 @@ # Get an authenticated azure.ai.inference ChatCompletionsClient for your default Serverless connection: with project_client.inference.get_chat_completions_client() as client: - response = client.complete(model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")]) + response = client.complete( + model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] + ) print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index ffd39a474a6f..dd9010f2f644 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -1,4 +1,7 @@ # pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines # # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -166,10 +169,10 @@ def test_create_update_delete_agent(self, **kwargs): assert agent.id print("Created agent, agent ID", agent.id) - # update agent + # update agent agent = client.agents.update_agent(agent.id, name="my-agent2", instructions="You are helpful agent") assert agent.name == "my-agent2" - + # delete agent and close client client.agents.delete_agent(agent.id) print("Deleted agent") @@ -200,7 +203,7 @@ def test_create_agent_with_tools(self, **kwargs): client.agents.delete_agent(agent.id) print("Deleted agent") client.close() - + # test agent creation with tools @agentClientPreparer() @recorded_by_proxy @@ -225,7 +228,7 @@ def test_create_agent_with_tools_and_resources(self, **kwargs): # delete agent and close client client.agents.delete_agent(agent.id) print("Deleted agent") - client.close() + client.close() @agentClientPreparer() @recorded_by_proxy @@ -1104,8 +1107,7 @@ def test_get_run_step(self, **kwargs): client.agents.delete_agent(agent.id) print("Deleted agent") client.close() - - + # test agent creation with invalid tool resource @agentClientPreparer() @recorded_by_proxy @@ -1120,12 +1122,19 @@ def test_create_agent_with_invalid_code_interpreter_tool_resource(self, **kwargs exception_message = "" try: client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=[], tool_resources=tool_resources + model="gpt-4o", + name="my-agent", + instructions="You are helpful agent", + tools=[], + tool_resources=tool_resources, ) except ValueError as e: exception_message = e.args[0] - assert exception_message == "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" + assert ( + exception_message + == "Tools must contain a CodeInterpreterToolDefinition when tool_resources.code_interpreter is provided" + ) # test agent creation with invalid tool resource @agentClientPreparer() @@ -1141,12 +1150,19 @@ def test_create_agent_with_invalid_file_search_tool_resource(self, **kwargs): exception_message = "" try: client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=[], tool_resources=tool_resources + model="gpt-4o", + name="my-agent", + instructions="You are helpful agent", + tools=[], + tool_resources=tool_resources, ) except ValueError as e: exception_message = e.args[0] - assert exception_message == "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" + assert ( + exception_message + == "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" + ) @agentClientPreparer() diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py index d4a6f6cfdf14..b675602308e0 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py @@ -20,13 +20,13 @@ def test_connections_get(self, **kwargs): with self.get_sync_client(**kwargs) as project_client: - assert project_client.connections.get( - connection_name="Some non-existing name", with_credentials=False - ) == None + assert ( + project_client.connections.get(connection_name="Some non-existing name", with_credentials=False) == None + ) - assert project_client.connections.get( - connection_name="Some non-existing name", with_credentials=True - ) == None + assert ( + project_client.connections.get(connection_name="Some non-existing name", with_credentials=True) == None + ) connection = project_client.connections.get(connection_name=aoai_connection, with_credentials=False) print(connection) @@ -75,13 +75,15 @@ def test_connections_get_default(self, **kwargs): with self.get_sync_client(**kwargs) as project_client: - assert project_client.connections.get_default( - connection_type="Some unrecognized type", with_credentials=False - ) == None + assert ( + project_client.connections.get_default(connection_type="Some unrecognized type", with_credentials=False) + == None + ) - assert project_client.connections.get_default( - connection_type="Some unrecognized type", with_credentials=True - ) == None + assert ( + project_client.connections.get_default(connection_type="Some unrecognized type", with_credentials=True) + == None + ) connection = project_client.connections.get_default( connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=False diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py index 32b65a09edf1..4b6699196e81 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py @@ -19,13 +19,15 @@ async def test_connections_get_async(self, **kwargs): async with self.get_async_client(**kwargs) as project_client: - assert await project_client.connections.get( - connection_name="Some non-existing name", with_credentials=False - ) == None + assert ( + await project_client.connections.get(connection_name="Some non-existing name", with_credentials=False) + == None + ) - assert await project_client.connections.get( - connection_name="Some non-existing name", with_credentials=True - ) == None + assert ( + await project_client.connections.get(connection_name="Some non-existing name", with_credentials=True) + == None + ) connection = await project_client.connections.get(connection_name=aoai_connection, with_credentials=False) print(connection) @@ -78,13 +80,19 @@ async def test_connections_get_default_async(self, **kwargs): async with self.get_async_client(**kwargs) as project_client: - assert await project_client.connections.get_default( - connection_type="Some unrecognized type", with_credentials=False - ) == None + assert ( + await project_client.connections.get_default( + connection_type="Some unrecognized type", with_credentials=False + ) + == None + ) - assert await project_client.connections.get_default( - connection_type="Some unrecognized type", with_credentials=True - ) == None + assert ( + await project_client.connections.get_default( + connection_type="Some unrecognized type", with_credentials=True + ) + == None + ) connection = await project_client.connections.get_default( connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=False diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py index d94519b2324f..835980ec7f7a 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py @@ -29,11 +29,9 @@ class TestConnectionsUnitTests(ConnectionsTestBase): def test_sas_token_credential_class_mocked(self, **kwargs): import jwt - import datetime import time # Create a simple JWT with 10 seconds expiration time - token_duration_sec = 5 secret_key = "my_secret_key" token_duration_sec = 5 sas_token_expiration: datetime = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta( @@ -70,7 +68,7 @@ def test_sas_token_credential_class_mocked(self, **kwargs): for _ in range(token_duration_sec + 2): print("Looping...") time.sleep(1) - access_token = sas_token_credential.get_token() + sas_token_credential.get_token() except HttpResponseError as e: exception_caught = True print(e) diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py index 38163e0070e6..b6eb42474899 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py @@ -18,9 +18,7 @@ def test_inference_get_azure_openai_client(self, **kwargs): model = kwargs.pop("azure_ai_projects_inference_tests_aoai_model_deployment_name") with self.get_sync_client(**kwargs) as project_client: # See API versions in https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - with project_client.inference.get_azure_openai_client( - api_version=api_version - ) as azure_openai_client: + with project_client.inference.get_azure_openai_client(api_version=api_version) as azure_openai_client: response = azure_openai_client.chat.completions.create( messages=[ { @@ -45,7 +43,7 @@ def test_inference_get_chat_completions_client(self, **kwargs): messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), - ] + ], ) pprint.pprint(response) contains = ["5280", "5,280"] diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py index 3e0507edf2a8..75d386e0dc8e 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py @@ -45,7 +45,7 @@ async def test_inference_get_chat_completions_client_async(self, **kwargs): messages=[ SystemMessage(content="You are a helpful assistant."), UserMessage(content="How many feet are in a mile?"), - ] + ], ) pprint.pprint(response) contains = ["5280", "5,280"] diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index 47e0eca91f0a..325c3e32605b 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: fc8aca891c34ecf74b4b2ca226f1dd4a84c8a6e3 +commit: bf15d7700a3eb48e9c5663b92837fae280923405 repo: Azure/azure-rest-api-specs additionalDirectories: From b8ba93e869aaad5098841a4792a6bef1f74330a4 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Mon, 4 Nov 2024 15:05:48 -0800 Subject: [PATCH 077/138] Fix handling of SAS token and fix mypy (#38312) * Fix handling of SAS token and fix mypy * typo * Fix also pylance issue --- .../azure/ai/projects/models/_models.py | 8 ++- .../azure/ai/projects/models/_patch.py | 11 ++-- .../test_connections_unit_tests.py | 53 +++++++++++++++++-- 3 files changed, 64 insertions(+), 8 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 56eb498f3e62..7ea2ea5cfcb9 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -1364,7 +1364,13 @@ class InternalConnectionProperties(_model_base.Model): __mapping__: Dict[str, _model_base.Model] = {} auth_type: str = rest_discriminator(name="authType") """Authentication type of the connection target. Required. Known values are: \"ApiKey\", \"AAD\", - and \"SAS\".""" + and \"SAS\".""" + # Will be generated + category: Union[str, "_models.ConnectionType"] = rest_field() + """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", + \"AzureBlob\", \"AIServices\", and \"CognitiveSearch\".""" + target: str = rest_field() + """The connection URL to be used for this service. Required.""" class InternalConnectionPropertiesAADAuth(InternalConnectionProperties, discriminator="AAD"): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index e356228ecf7b..e1e8667e8ec2 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -88,7 +88,7 @@ def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[st return new_params -def _safe_instantiate(model_class: Type, parameters: Dict[str, Any]) -> Any: +def _safe_instantiate(model_class: Type, parameters: Union[str, Dict[str, Any]]) -> Any: """ Instantiate class with the set of parameters from the server. @@ -126,7 +126,7 @@ def __init__( self.id = connection.id self.name = connection.name self.authentication_type = connection.properties.auth_type - self.connection_type = connection.properties.category + self.connection_type = cast(ConnectionType, connection.properties.category) self.endpoint_url = ( connection.properties.target[:-1] if connection.properties.target.endswith("/") @@ -223,7 +223,10 @@ def _refresh_token(self) -> None: connection = project_client.connections.get(connection_name=self._connection_name, with_credentials=True) - self._sas_token = connection.properties.credentials.sas + self._sas_token = "" + if connection.token_credential is not None: + sas_credential = cast(SASTokenCredential, connection.token_credential) + self._sas_token = sas_credential._sas_token self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) @@ -897,7 +900,7 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: raise ValueError("Event type not specified in the event data.") try: - parsed_data = json.loads(event_data) + parsed_data: Union[str, Dict[str, Any]] = cast(Dict[str, Any], json.loads(event_data)) except json.JSONDecodeError: parsed_data = event_data diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py index 835980ec7f7a..8be0e6de97c2 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py @@ -4,10 +4,16 @@ # ------------------------------------ # cSpell:disable import datetime +import jwt +import time + from azure.ai.projects.models import SASTokenCredential from azure.core.credentials import TokenCredential, AccessToken from azure.core.exceptions import HttpResponseError from connection_test_base import ConnectionsTestBase +from azure.ai.projects.models._patch import ConnectionProperties +from azure.ai.projects.models._models import GetConnectionResponse +from unittest.mock import MagicMock, patch class FakeTokenCredential(TokenCredential): @@ -28,9 +34,6 @@ class TestConnectionsUnitTests(ConnectionsTestBase): # ********************************************************************************** def test_sas_token_credential_class_mocked(self, **kwargs): - import jwt - import time - # Create a simple JWT with 10 seconds expiration time secret_key = "my_secret_key" token_duration_sec = 5 @@ -74,6 +77,50 @@ def test_sas_token_credential_class_mocked(self, **kwargs): print(e) assert exception_caught + def _get_fake_token(self, exiration): + """Return the fake sas token.""" + secret_key = "my_secret_key" + payload = {"exp": exiration} + sas_token = jwt.encode(payload, secret_key) + return SASTokenCredential( + sas_token=sas_token, + credential=FakeTokenCredential(), + subscription_id="fake_subscription_id", + resource_group_name="fake_resource_group", + project_name="fake_project_name", + connection_name="fake_connection_name", + ) + + def test_mock_subscription_refresh_token(self): + """Test refreshing token with mock subscription""" + token_duration_sec = 5 + # Let our token be already expired. + sas_token_expiration: datetime = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta( + seconds=token_duration_sec + ) + sas_token_expiration = sas_token_expiration.replace(microsecond=0) + sas_token_credential = self._get_fake_token(sas_token_expiration) + assert sas_token_credential._expires_on == sas_token_expiration + new_expiration: datetime = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta( + seconds=token_duration_sec + ) + new_token_credential = self._get_fake_token(new_expiration) + + mock_properties = MagicMock() + mock_properties.auth_type = 'sas_token' + mock_properties.category = 'fake_category' + mock_properties.target = 'microsoft.com' + mock_properties.credentials.key='very secret key' + conn_resp = GetConnectionResponse( + id = '12334', + name = 'Fake_connection', + properties = mock_properties + ) + conn = ConnectionProperties(connection=conn_resp, token_credential=new_token_credential) + with patch('azure.ai.projects.operations.ConnectionsOperations.get', return_value=conn): + new_token = sas_token_credential.get_token() + assert new_token.expires_on == int(new_expiration.timestamp()) + # Unit tests for the SASTokenCredential class def test_sas_token_credential_class_real(self, **kwargs): From a1b6f4659e170e9320b860de5a08e4a5d4295d13 Mon Sep 17 00:00:00 2001 From: Glenn Harper <64209257+glharper@users.noreply.github.com> Date: Mon, 4 Nov 2024 15:35:27 -0800 Subject: [PATCH 078/138] Glharper/ai client new tools (#38202) * [AI Client] Add new search tools * add hooks to update connection lists for bing search * use updated typespec * use ConnectionType and ConnectionsOperations * endpointOperations -> ConnectionsOperations * add AzureAISearch tool def * adapt project with new tool changes * Add new tool defs for bing and azure ai search to projects.models * use service schema for BingGroundingTool * add samples for new tools * add sharepoint tool * update prompt to use bing browsing * add sharepoint_grounding discriminator * code gen from merged typespec * update to latest typespec * review feedback * add preview header to bing grounding and azure AI serach samples --- .../async_samples/sample_endpoints_async.py | 132 ++++++++++++++++++ .../azure/ai/projects/models/_patch.py | 114 +++++++++++++++ .../azure/ai/projects/operations/_patch.py | 1 + .../agents/sample_agents_azure_ai_search.py | 88 ++++++++++++ .../agents/sample_agents_bing_grounding.py | 85 +++++++++++ 5 files changed, 420 insertions(+) create mode 100644 sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py diff --git a/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py b/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py new file mode 100644 index 000000000000..83744642673a --- /dev/null +++ b/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py @@ -0,0 +1,132 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_endpoints_async.py + +DESCRIPTION: + Given an asynchronous AzureAIClient, this sample demonstrates how to enumerate endpoints + and get endpoint properties. + +USAGE: + python sample_endpoints_async.py + + Before running the sample: + + pip install azure.ai.client aiohttp azure-identity + + Set the environment variables with your own values: + 1) AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import asyncio +import os +from azure.ai.client.aio import AzureAIClient +from azure.ai.client.models import ConnectionType, AuthenticationType +from azure.identity import DefaultAzureCredential + + +async def sample_endpoints_async(): + + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # It should be in the format ";;;" + async with AzureAIClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], + ) as ai_client: + + # List all endpoints of a particular "type", with or without their credentials: + print("====> Listing of all Azure Open AI endpoints:") + async for endpoint in ai_client.endpoints.list( + endpoint_type=ConnectionType.AZURE_OPEN_AI, # Optional. Defaults to all types. + populate_secrets=True, # Optional. Defaults to "False" + ): + print(endpoint) + + # Get the default endpoint of a particular "type" (note that since at the moment the service + # does not have a notion of a default endpoint, this will return the first endpoint of that type): + endpoint = await ai_client.endpoints.get_default( + endpoint_type=ConnectionType.AZURE_OPEN_AI, + populate_secrets=True, # Required. # Optional. Defaults to "False" + ) + print("====> Get default Azure Open AI endpoint:") + print(endpoint) + + # Get an endpoint by its name: + endpoint = await ai_client.endpoints.get( + endpoint_name=os.environ["AI_CLIENT_ENDPOINT_NAME"], populate_secrets=True # Required. + ) + print("====> Get endpoint by name:") + print(endpoint) + + # Examples of how you would create Inference client + if endpoint.endpoint_type == ConnectionType.AZURE_OPEN_AI: + + from openai import AsyncAzureOpenAI + + if endpoint.authentication_type == AuthenticationType.API_KEY: + print("====> Creating AzureOpenAI client using API key authentication") + client = AsyncAzureOpenAI( + api_key=endpoint.key, + azure_endpoint=endpoint.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + ) + elif endpoint.authentication_type == AuthenticationType.AAD: + print("====> Creating AzureOpenAI client using Entra ID authentication") + from azure.identity import get_bearer_token_provider + + client = AsyncAzureOpenAI( + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + azure_ad_token_provider=get_bearer_token_provider( + endpoint.token_credential, "https://cognitiveservices.azure.com/.default" + ), + azure_endpoint=endpoint.endpoint_url, + api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs + ) + else: + raise ValueError(f"Authentication type {endpoint.authentication_type} not supported.") + + response = await client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], + ) + print(response.choices[0].message.content) + + elif endpoint.endpoint_type == ConnectionType.SERVERLESS: + + from azure.ai.inference.aio import ChatCompletionsClient + from azure.ai.inference.models import UserMessage + + if endpoint.authentication_type == AuthenticationType.API_KEY: + print("====> Creating ChatCompletionsClient using API key authentication") + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) + elif endpoint.authentication_type == AuthenticationType.AAD: + # MaaS models do not yet support EntraID auth + print("====> Creating ChatCompletionsClient using Entra ID authentication") + client = ChatCompletionsClient( + endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential + ) + else: + raise ValueError(f"Authentication type {endpoint.authentication_type} not supported.") + + response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) + await client.close() + print(response.choices[0].message.content) + + +async def main(): + await sample_endpoints_async() + + +if __name__ == "__main__": + asyncio.run(main()) + diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index e1e8667e8ec2..35d32436a4c8 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -33,6 +33,13 @@ ToolResources, FileSearchToolDefinition, FileSearchToolResource, + BingGroundingToolDefinition, + SharepointToolDefinition, + ToolConnection, + ToolConnectionList, + AzureAISearchResource, + IndexResource, + AzureAISearchToolDefinition, CodeInterpreterToolDefinition, CodeInterpreterToolResource, RequiredFunctionToolCall, @@ -486,6 +493,110 @@ async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: return json.dumps({"error": error_message}) +class AzureAISearchTool(Tool): + """ + A tool that searches for information using Azure AI Search. + """ + + def __init__(self): + self.index_list = [] + + def add_index(self, index: str): + """ + Add an index ID to the list of indices used to search. + """ + # TODO + self.index_list.append(IndexResource(index_connection_id=index)) + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the Azure AI search tool definitions. + """ + return [AzureAISearchToolDefinition()] + + @property + def resources(self) -> ToolResources: + """ + Get the Azure AI search resources. + """ + return ToolResources(azure_ai_search=AzureAISearchResource(index_list=self.index_list)) + + def execute(self, tool_call: Any) -> Any: + pass + + +class ConnectionTool(Tool): + """ + A tool that requires connection ids. + Used as base class for Bing Grounding, Sharepoint, and Microsoft Fabric + """ + + def __init__(self, connection_id: str): + """ + Initialize ConnectionTool with a connection_id. + + :param connection_id: Connection ID used by tool. All connection tools allow only one connection. + """ + self.connection_ids = [ToolConnection(connection_id=connection_id)] + + @property + def resources(self) -> ToolResources: + """ + Get the connection tool resources. + """ + return ToolResources() + + def execute(self, tool_call: Any) -> Any: + pass + + +class BingGroundingTool(ConnectionTool): + """ + A tool that searches for information using Bing. + """ + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the Bing grounding tool definitions. + """ + return [BingGroundingToolDefinition(bing_grounding=ToolConnectionList(connection_list=self.connection_ids))] + + +class SharepointTool(ConnectionTool): + """ + A tool that searches for information using Sharepoint. + """ + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the Sharepoint tool definitions. + """ + return [SharepointToolDefinition(sharepoint_grounding=ToolConnectionList(connection_list=self.connection_ids))] + + +""" + def updateConnections(self, connection_list: List[Tuple[str, str]]) -> None: +# use connection_list to auto-update connections for bing search tool if no pre-existing + if self.connection_ids.__len__() == 0: + for id, connection_type in connection_list: + if connection_type == "ApiKey": + self.connection_ids.append(id) + return +""" + + +class FileSearchTool(Tool): + """ + A tool that searches for uploaded file information from the created vector stores. + """ + + def __init__(self, vector_store_ids: List[str] = []): + self.vector_store_ids = vector_store_ids + + class FileSearchTool(Tool): """ A tool that searches for uploaded file information from the created vector stores. @@ -1240,6 +1351,9 @@ def get_last_text_message_by_sender(self, sender: str) -> Optional[MessageTextCo "ThreadMessages", "FileSearchTool", "FunctionTool", + "BingGroundingTool", + "SharepointTool", + "AzureAISearchTool", "SASTokenCredential", "Tool", "ToolSet", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 0f4c10b14c2f..32f453154db1 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -1,4 +1,5 @@ # pylint: disable=too-many-lines +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py new file mode 100644 index 000000000000..ca600f610302 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py @@ -0,0 +1,88 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_azure_ai_search.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with the + Azure AI Search tool from the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_azure_ai_search.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import AzureAISearchTool + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +conn_list = project_client.connections.list() +conn_id = "" +for conn in conn_list: + if conn.connection_type == "CognitiveSearch": + conn_id = conn.id + break + +print(conn_id) + +# Initialize agent AI search tool and add the search index connection id +ai_search = AzureAISearchTool() +ai_search.add_index(conn_id) + +# Create agent with AI search tool and process assistant run +with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a helpful assistant", + tools=ai_search.definitions, + headers={"x-ms-enable-preview": "true"}, + ) + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, send an email with the datetime and weather information in New York?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py new file mode 100644 index 000000000000..12384b648563 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py @@ -0,0 +1,85 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_bing_grounding.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with the Bing grounding tool from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_bing_grounding.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import BingGroundingTool + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +bing_connection = project_client.connections.get( + connection_name=os.environ["BING_CONNECTION_NAME"] +) +conn_id = bing_connection.id + +print(conn_id) + +# Initialize agent bing tool and add the connection id +bing = BingGroundingTool(connection_id=conn_id) + +# Create agent with the bing tool and process assistant run +with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a helpful assistant", + tools=bing.definitions, + headers={"x-ms-enable-preview": "true"} + ) + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="How does wikipedia explain Euler's Identity?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") From 4cc537c0a1e210cad84787215ef4de31442162a1 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 5 Nov 2024 12:31:18 -0800 Subject: [PATCH 079/138] Use 'Union' insted of '|' to make it run on Pyton 3.8 and up (#38344) --- .../azure/ai/projects/aio/operations/_patch.py | 4 ++-- .../azure-ai-projects/azure/ai/projects/operations/_patch.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 58220a597f31..0963cee4613d 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -175,7 +175,7 @@ async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": return client @distributed_trace_async - async def get_azure_openai_client(self, *, api_version: str | None = None, **kwargs) -> "AsyncAzureOpenAI": + async def get_azure_openai_client(self, *, api_version: Union[str, None] = None, **kwargs) -> "AsyncAzureOpenAI": """Get an authenticated AsyncAzureOpenAI client (from the `openai` package) for the default Azure OpenAI connection. The package `openai` must be installed prior to calling this method. @@ -321,7 +321,7 @@ async def get(self, *, connection_name: str, with_credentials: bool = False, **k @distributed_trace_async async def list( - self, *, connection_type: ConnectionType | None = None, **kwargs: Any + self, *, connection_type: Union[ConnectionType, None] = None, **kwargs: Any ) -> Iterable[ConnectionProperties]: """List the properties of all connections, or all connections of a certain connection type. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 32f453154db1..eac6665e9878 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -183,7 +183,7 @@ def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": return client @distributed_trace - def get_azure_openai_client(self, *, api_version: str | None = None, **kwargs) -> "AzureOpenAI": + def get_azure_openai_client(self, *, api_version: Union[str, None] = None, **kwargs) -> "AzureOpenAI": """Get an authenticated AzureOpenAI client (from the `openai` package) for the default Azure OpenAI connection. The package `openai` must be installed prior to calling this method. @@ -329,7 +329,7 @@ def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: return ConnectionProperties(connection=connection) @distributed_trace - def list(self, *, connection_type: ConnectionType | None = None, **kwargs: Any) -> Iterable[ConnectionProperties]: + def list(self, *, connection_type: Union[ConnectionType, None] = None, **kwargs: Any) -> Iterable[ConnectionProperties]: """List the properties of all connections, or all connections of a certain connection type. :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. From 30853589f961816c2de10e9c3fc2ddaa3c8ef78f Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Tue, 5 Nov 2024 17:12:50 -0800 Subject: [PATCH 080/138] Fix mypy issues and re generate the code. (#38340) * Add more mypy fixes * Fix imports * Fix * Add safe conversion method * Fix mypy * Merge to main * Fix mypy * Fix unit test * Sphinx fix * Fix cspell --- .../async_samples/sample_endpoints_async.py | 132 ------------------ .../azure/ai/projects/_patch.py | 18 +-- .../azure/ai/projects/_types.py | 1 + .../azure/ai/projects/aio/_patch.py | 28 ++-- .../ai/projects/aio/operations/_patch.py | 100 ++++++------- .../azure/ai/projects/models/_models.py | 47 +++---- .../azure/ai/projects/models/_patch.py | 24 ++-- .../azure/ai/projects/operations/_patch.py | 82 ++++++----- .../agents/_ai_agents_instrumentor.py | 61 +++++--- .../tests/agents/test_agents_client.py | 39 +++--- .../test_connections_unit_tests.py | 18 +-- .../telemetry/test_ai_agents_instrumentor.py | 31 ++++ sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 13 files changed, 251 insertions(+), 332 deletions(-) delete mode 100644 sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py create mode 100644 sdk/ai/azure-ai-projects/tests/telemetry/test_ai_agents_instrumentor.py diff --git a/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py b/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py deleted file mode 100644 index 83744642673a..000000000000 --- a/sdk/ai/azure-ai-client/samples/endpoints/async_samples/sample_endpoints_async.py +++ /dev/null @@ -1,132 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ - -""" -FILE: sample_endpoints_async.py - -DESCRIPTION: - Given an asynchronous AzureAIClient, this sample demonstrates how to enumerate endpoints - and get endpoint properties. - -USAGE: - python sample_endpoints_async.py - - Before running the sample: - - pip install azure.ai.client aiohttp azure-identity - - Set the environment variables with your own values: - 1) AI_CLIENT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. -""" - -import asyncio -import os -from azure.ai.client.aio import AzureAIClient -from azure.ai.client.models import ConnectionType, AuthenticationType -from azure.identity import DefaultAzureCredential - - -async def sample_endpoints_async(): - - # Create an Azure AI Client from a connection string, copied from your AI Studio project. - # It should be in the format ";;;" - async with AzureAIClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["AI_CLIENT_CONNECTION_STRING"], - ) as ai_client: - - # List all endpoints of a particular "type", with or without their credentials: - print("====> Listing of all Azure Open AI endpoints:") - async for endpoint in ai_client.endpoints.list( - endpoint_type=ConnectionType.AZURE_OPEN_AI, # Optional. Defaults to all types. - populate_secrets=True, # Optional. Defaults to "False" - ): - print(endpoint) - - # Get the default endpoint of a particular "type" (note that since at the moment the service - # does not have a notion of a default endpoint, this will return the first endpoint of that type): - endpoint = await ai_client.endpoints.get_default( - endpoint_type=ConnectionType.AZURE_OPEN_AI, - populate_secrets=True, # Required. # Optional. Defaults to "False" - ) - print("====> Get default Azure Open AI endpoint:") - print(endpoint) - - # Get an endpoint by its name: - endpoint = await ai_client.endpoints.get( - endpoint_name=os.environ["AI_CLIENT_ENDPOINT_NAME"], populate_secrets=True # Required. - ) - print("====> Get endpoint by name:") - print(endpoint) - - # Examples of how you would create Inference client - if endpoint.endpoint_type == ConnectionType.AZURE_OPEN_AI: - - from openai import AsyncAzureOpenAI - - if endpoint.authentication_type == AuthenticationType.API_KEY: - print("====> Creating AzureOpenAI client using API key authentication") - client = AsyncAzureOpenAI( - api_key=endpoint.key, - azure_endpoint=endpoint.endpoint_url, - api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - ) - elif endpoint.authentication_type == AuthenticationType.AAD: - print("====> Creating AzureOpenAI client using Entra ID authentication") - from azure.identity import get_bearer_token_provider - - client = AsyncAzureOpenAI( - # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider - azure_ad_token_provider=get_bearer_token_provider( - endpoint.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=endpoint.endpoint_url, - api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs - ) - else: - raise ValueError(f"Authentication type {endpoint.authentication_type} not supported.") - - response = await client.chat.completions.create( - model="gpt-4o", - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - }, - ], - ) - print(response.choices[0].message.content) - - elif endpoint.endpoint_type == ConnectionType.SERVERLESS: - - from azure.ai.inference.aio import ChatCompletionsClient - from azure.ai.inference.models import UserMessage - - if endpoint.authentication_type == AuthenticationType.API_KEY: - print("====> Creating ChatCompletionsClient using API key authentication") - from azure.core.credentials import AzureKeyCredential - - client = ChatCompletionsClient(endpoint=endpoint.endpoint_url, credential=AzureKeyCredential(endpoint.key)) - elif endpoint.authentication_type == AuthenticationType.AAD: - # MaaS models do not yet support EntraID auth - print("====> Creating ChatCompletionsClient using Entra ID authentication") - client = ChatCompletionsClient( - endpoint=endpoint.endpoint_url, credential=endpoint.properties.token_credential - ) - else: - raise ValueError(f"Authentication type {endpoint.authentication_type} not supported.") - - response = await client.complete(messages=[UserMessage(content="How many feet are in a mile?")]) - await client.close() - print(response.choices[0].message.content) - - -async def main(): - await sample_endpoints_async() - - -if __name__ == "__main__": - asyncio.run(main()) - diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index 37be4bcc06d4..05c6e449eca7 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -57,7 +57,7 @@ def __init__( # The AppInsights resource URL is not known at this point. We need to get it from the AzureML "Workspace - Get" REST API call. It will have # the form: https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} _endpoint0 = f"https://management.azure.com" # pylint: disable=line-too-long - self._config0 = AIProjectClientConfiguration( + self._config0: AIProjectClientConfiguration = AIProjectClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, @@ -85,11 +85,11 @@ def __init__( policies.SensitiveHeaderCleanupPolicy(**kwargs0) if self._config0.redirect_policy else None, self._config0.http_logging_policy, ] - self._client0 = PipelineClient(base_url=_endpoint0, policies=_policies0, **kwargs0) + self._client0: PipelineClient = PipelineClient(base_url=_endpoint0, policies=_policies0, **kwargs0) # For Endpoints operations (listing connections, getting connection properties, getting project properties) _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config1 = AIProjectClientConfiguration( + self._config1: AIProjectClientConfiguration = AIProjectClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, @@ -116,7 +116,7 @@ def __init__( policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, self._config1.http_logging_policy, ] - self._client1 = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) + self._client1: PipelineClient = PipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) # For Agents operations _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long @@ -147,7 +147,7 @@ def __init__( policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, self._config2.http_logging_policy, ] - self._client2 = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) + self._client2: PipelineClient = PipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) # For Cloud Evaluations operations # cSpell:disable-next-line @@ -179,7 +179,7 @@ def __init__( policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, self._config3.http_logging_policy, ] - self._client3 = PipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) + self._client3: PipelineClient = PipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) self._serialize = Serializer() self._deserialize = Deserializer() @@ -240,9 +240,9 @@ def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: :rtype: str """ try: - from azure.ai.ml import MLClient - from azure.ai.ml.entities import Data - from azure.ai.ml.constants import AssetTypes + from azure.ai.ml import MLClient # type: ignore + from azure.ai.ml.entities import Data # type: ignore + from azure.ai.ml.constants import AssetTypes # type: ignore except ImportError: raise ImportError( "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py index b540a961b2f1..c438829bda41 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py @@ -10,6 +10,7 @@ if TYPE_CHECKING: from . import models as _models + from .. import models as _models AgentsApiResponseFormatOption = Union[ str, str, "_models.AgentsApiResponseFormatMode", "_models.AgentsApiResponseFormat" ] diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 5905071bcf97..14b3f16af77f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -9,7 +9,7 @@ import uuid from os import PathLike from pathlib import Path -from typing import List, Any, Union, Dict +from typing import List, Any, Union, Dict, TYPE_CHECKING from azure.core import AsyncPipelineClient from azure.core.pipeline import policies from typing_extensions import Self @@ -20,6 +20,9 @@ from ._client import AIProjectClient as ClientGenerated from .operations._patch import InferenceOperations +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + class AIProjectClient(ClientGenerated): @@ -57,7 +60,7 @@ def __init__( # The AppInsights resource URL is not known at this point. We need to get it from the AzureML "Workspace - Get" REST API call. It will have # the form: https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} _endpoint0 = f"https://management.azure.com" # pylint: disable=line-too-long - self._config0 = AIProjectClientConfiguration( + self._config0: AIProjectClientConfiguration = AIProjectClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, @@ -85,11 +88,11 @@ def __init__( policies.SensitiveHeaderCleanupPolicy(**kwargs0) if self._config0.redirect_policy else None, self._config0.http_logging_policy, ] - self._client0 = AsyncPipelineClient(base_url=_endpoint0, policies=_policies0, **kwargs0) + self._client0: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint0, policies=_policies0, **kwargs0) # For Endpoints operations (enumerating connections, getting SAS tokens) _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config1 = AIProjectClientConfiguration( + self._config1: AIProjectClientConfiguration = AIProjectClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, @@ -116,11 +119,11 @@ def __init__( policies.SensitiveHeaderCleanupPolicy(**kwargs1) if self._config1.redirect_policy else None, self._config1.http_logging_policy, ] - self._client1 = AsyncPipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) + self._client1: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint1, policies=_policies1, **kwargs1) # For Agents operations _endpoint2 = f"{endpoint}/agents/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config2 = AIProjectClientConfiguration( + self._config2: AIProjectClientConfiguration = AIProjectClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, @@ -147,11 +150,12 @@ def __init__( policies.SensitiveHeaderCleanupPolicy(**kwargs2) if self._config2.redirect_policy else None, self._config2.http_logging_policy, ] - self._client2 = AsyncPipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) + self._client2: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint2, policies=_policies2, **kwargs2) # For Cloud Evaluations operations + # cSpell:disable-next-line _endpoint3 = f"{endpoint}/raisvc/v1.0/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long - self._config3 = AIProjectClientConfiguration( + self._config3: AIProjectClientConfiguration = AIProjectClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, resource_group_name=resource_group_name, @@ -178,7 +182,7 @@ def __init__( policies.SensitiveHeaderCleanupPolicy(**kwargs3) if self._config3.redirect_policy else None, self._config3.http_logging_policy, ] - self._client3 = AsyncPipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) + self._client3: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint3, policies=_policies3, **kwargs3) self._serialize = Serializer() self._deserialize = Deserializer() @@ -239,9 +243,9 @@ def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: :rtype: str """ try: - from azure.ai.ml import MLClient - from azure.ai.ml.entities import Data - from azure.ai.ml.constants import AssetTypes + from azure.ai.ml import MLClient # type: ignore + from azure.ai.ml.entities import Data # type: ignore + from azure.ai.ml.constants import AssetTypes # type: ignore except ImportError: raise ImportError( "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 0963cee4613d..c6fb912d69b0 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -15,9 +15,21 @@ from pathlib import Path from azure.core.exceptions import ResourceNotFoundError from io import TextIOWrapper -from typing import IO, Any, AsyncIterator, Dict, List, Iterable, MutableMapping, Optional, Union, cast, overload +from typing import ( + IO, + Any, + AsyncIterator, + Dict, + List, + MutableMapping, + Optional, + Union, + cast, + overload, + Sequence, + TYPE_CHECKING, +) -from azure.ai.projects import _types from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated from ._operations import AgentsOperations as AgentsOperationsGenerated from ._operations import TelemetryOperations as TelemetryOperationsGenerated @@ -28,11 +40,20 @@ ListConnectionsResponse, GetAppInsightsResponse, GetWorkspaceResponse, + InternalConnectionPropertiesSASAuth, ) from ... import models as _models from ...operations._patch import _enable_telemetry from azure.core.tracing.decorator_async import distributed_trace_async + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.ai.projects import _types + from azure.ai.inference.aio import ChatCompletionsClient, EmbeddingsClient + from openai import AsyncAzureOpenAI + from azure.identity import get_bearer_token_provider + logger = logging.getLogger(__name__) JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object @@ -45,7 +66,7 @@ def __init__(self, outer_instance): self._outer_instance = outer_instance @distributed_trace_async - async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": + async def get_chat_completions_client(self, **kwargs) -> "Optional[ChatCompletionsClient]": """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource. At least one AI model that supports chat completions must be deployed in this resource. The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. @@ -110,7 +131,7 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" return client @distributed_trace_async - async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": + async def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource. At least one AI model that supports text embeddings must be deployed in this resource. The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. @@ -175,7 +196,7 @@ async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": return client @distributed_trace_async - async def get_azure_openai_client(self, *, api_version: Union[str, None] = None, **kwargs) -> "AsyncAzureOpenAI": + async def get_azure_openai_client(self, *, api_version: Optional[str] = None, **kwargs) -> "AsyncAzureOpenAI": """Get an authenticated AsyncAzureOpenAI client (from the `openai` package) for the default Azure OpenAI connection. The package `openai` must be installed prior to calling this method. @@ -207,16 +228,22 @@ async def get_azure_openai_client(self, *, api_version: Union[str, None] = None, client = AsyncAzureOpenAI( api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=api_version ) - elif connection.authentication_type == AuthenticationType.AAD: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" - ) + elif ( + connection.authentication_type == AuthenticationType.AAD + or connection.authentication_type == AuthenticationType.SAS + ): + try: from azure.identity import get_bearer_token_provider except ModuleNotFoundError as _: raise ModuleNotFoundError( "azure.identity package not installed. Please install it using 'pip install azure-identity'" ) + if connection.authentication_type == AuthenticationType.AAD: + auth = "Creating AzureOpenAI using Entra ID authentication" + else: + auth = "Creating AzureOpenAI using SAS authentication" + logger.debug(f"[InferenceOperations.get_azure_openai_client] {auth}") client = AsyncAzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( @@ -225,15 +252,6 @@ async def get_azure_openai_client(self, *, api_version: Union[str, None] = None, azure_endpoint=connection.endpoint_url, api_version=api_version, ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") - client = AsyncAzureOpenAI( - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=api_version, - ) else: raise ValueError("Unknown authentication type") @@ -245,7 +263,7 @@ class ConnectionsOperations(ConnectionsOperationsGenerated): @distributed_trace_async async def get_default( self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: + ) -> Optional[ConnectionProperties]: """Get the properties of the default connection of a certain connection type, with or without populating authentication credentials. @@ -274,7 +292,9 @@ async def get_default( return None @distributed_trace_async - async def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: + async def get( + self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any + ) -> Optional[ConnectionProperties]: """Get the properties of a single connection, given its connection name, with or without populating authentication credentials. @@ -301,8 +321,10 @@ async def get(self, *, connection_name: str, with_credentials: bool = False, **k elif connection.properties.auth_type == AuthenticationType.SAS: from ...models._patch import SASTokenCredential + cred_prop = cast(InternalConnectionPropertiesSASAuth, connection.properties) + token_credential = SASTokenCredential( - sas_token=connection.properties.credentials.sas, + sas_token=cred_prop.credentials.sas, credential=self._config.credential, subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, @@ -321,8 +343,8 @@ async def get(self, *, connection_name: str, with_credentials: bool = False, **k @distributed_trace_async async def list( - self, *, connection_type: Union[ConnectionType, None] = None, **kwargs: Any - ) -> Iterable[ConnectionProperties]: + self, *, connection_type: Optional[ConnectionType] = None, **kwargs: Any + ) -> Sequence[ConnectionProperties]: """List the properties of all connections, or all connections of a certain connection type. :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. @@ -354,7 +376,7 @@ def __init__(self, *args, **kwargs): self._outer_instance = kwargs.pop("outer_instance") super().__init__(*args, **kwargs) - async def get_connection_string(self) -> str: + async def get_connection_string(self) -> Optional[str]: """ Get the Application Insights connection string associated with the Project's Application Insights resource. On first call, this method makes a GET call to the Application Insights resource URL to get the connection string. @@ -480,7 +502,7 @@ async def create_agent( name: Optional[str] = None, description: Optional[str] = None, instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, + toolset: Optional[_models.AsyncToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, @@ -705,7 +727,7 @@ async def update_agent( name: Optional[str] = None, description: Optional[str] = None, instructions: Optional[str] = None, - toolset: Optional[_models.ToolSet] = None, + toolset: Optional[_models.AsyncToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, @@ -1310,24 +1332,6 @@ async def create_and_process_run( return run - @overload - def create_stream( - self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any - ) -> _models.AsyncAgentRunStream: - """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. - - :param thread_id: Required. - :type thread_id: str - :param body: Required. - :type body: JSON - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. - :rtype: ~azure.ai.projects.models.AsyncAgentRunStream - :raises ~azure.core.exceptions.HttpResponseError: - """ - @overload async def create_stream( self, @@ -1430,7 +1434,7 @@ async def create_stream( @overload async def create_stream( - self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + self, thread_id: str, body: Union[JSON, IO[bytes]], *, content_type: str = "application/json", **kwargs: Any ) -> _models.AsyncAgentRunStream: """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. @@ -1888,7 +1892,7 @@ async def upload_file( @overload async def upload_file( - self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any + self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any ) -> _models.OpenAIFile: """Uploads a file for use by other operations. @@ -1956,7 +1960,7 @@ async def upload_file( raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") @overload - async def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: + async def upload_file_and_poll(self, *, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. :param body: Required. @@ -1998,7 +2002,7 @@ async def upload_file_and_poll( @overload async def upload_file_and_poll( - self, file_path: str, *, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any + self, *, file_path: str, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any ) -> _models.OpenAIFile: """Uploads a file for use by other operations. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 7ea2ea5cfcb9..4332a77f614f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -1359,13 +1359,17 @@ class InternalConnectionProperties(_model_base.Model): :ivar auth_type: Authentication type of the connection target. Required. Known values are: "ApiKey", "AAD", and "SAS". :vartype auth_type: str or ~azure.ai.projects.models.AuthenticationType + :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", + "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". + :vartype category: str or ~azure.ai.projects.models.ConnectionType + :ivar target: The connection URL to be used for this service. Required. + :vartype target: str """ __mapping__: Dict[str, _model_base.Model] = {} auth_type: str = rest_discriminator(name="authType") """Authentication type of the connection target. Required. Known values are: \"ApiKey\", \"AAD\", - and \"SAS\".""" - # Will be generated + and \"SAS\".""" category: Union[str, "_models.ConnectionType"] = rest_field() """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", \"AzureBlob\", \"AIServices\", and \"CognitiveSearch\".""" @@ -1378,77 +1382,62 @@ class InternalConnectionPropertiesAADAuth(InternalConnectionProperties, discrimi ). - :ivar auth_type: Authentication type of the connection target. Required. Entra ID - authentication - :vartype auth_type: str or ~azure.ai.projects.models.AAD :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". :vartype category: str or ~azure.ai.projects.models.ConnectionType :ivar target: The connection URL to be used for this service. Required. :vartype target: str + :ivar auth_type: Authentication type of the connection target. Required. Entra ID + authentication + :vartype auth_type: str or ~azure.ai.projects.models.AAD """ auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. Entra ID authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", \"AIServices\", and \"CognitiveSearch\".""" - target: str = rest_field() - """The connection URL to be used for this service. Required.""" class InternalConnectionPropertiesApiKeyAuth(InternalConnectionProperties, discriminator="ApiKey"): """Connection properties for connections with API key authentication. - :ivar auth_type: Authentication type of the connection target. Required. API Key authentication - :vartype auth_type: str or ~azure.ai.projects.models.API_KEY :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". :vartype category: str or ~azure.ai.projects.models.ConnectionType - :ivar credentials: Credentials will only be present for authType=ApiKey. Required. - :vartype credentials: ~azure.ai.projects.models._models.CredentialsApiKeyAuth :ivar target: The connection URL to be used for this service. Required. :vartype target: str + :ivar auth_type: Authentication type of the connection target. Required. API Key authentication + :vartype auth_type: str or ~azure.ai.projects.models.API_KEY + :ivar credentials: Credentials will only be present for authType=ApiKey. Required. + :vartype credentials: ~azure.ai.projects.models._models.CredentialsApiKeyAuth """ auth_type: Literal[AuthenticationType.API_KEY] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. API Key authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", \"AIServices\", and \"CognitiveSearch\".""" credentials: "_models._models.CredentialsApiKeyAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" - target: str = rest_field() - """The connection URL to be used for this service. Required.""" class InternalConnectionPropertiesSASAuth(InternalConnectionProperties, discriminator="SAS"): """Connection properties for connections with SAS authentication. - :ivar auth_type: Authentication type of the connection target. Required. Shared Access - Signature (SAS) authentication - :vartype auth_type: str or ~azure.ai.projects.models.SAS :ivar category: Category of the connection. Required. Known values are: "AzureOpenAI", "Serverless", "AzureBlob", "AIServices", and "CognitiveSearch". :vartype category: str or ~azure.ai.projects.models.ConnectionType - :ivar credentials: Credentials will only be present for authType=ApiKey. Required. - :vartype credentials: ~azure.ai.projects.models._models.CredentialsSASAuth :ivar target: The connection URL to be used for this service. Required. :vartype target: str + :ivar auth_type: Authentication type of the connection target. Required. Shared Access + Signature (SAS) authentication + :vartype auth_type: str or ~azure.ai.projects.models.SAS + :ivar credentials: Credentials will only be present for authType=ApiKey. Required. + :vartype credentials: ~azure.ai.projects.models._models.CredentialsSASAuth """ auth_type: Literal[AuthenticationType.SAS] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. Shared Access Signature (SAS) authentication""" - category: Union[str, "_models.ConnectionType"] = rest_field() - """Category of the connection. Required. Known values are: \"AzureOpenAI\", \"Serverless\", - \"AzureBlob\", \"AIServices\", and \"CognitiveSearch\".""" credentials: "_models._models.CredentialsSASAuth" = rest_field() """Credentials will only be present for authType=ApiKey. Required.""" - target: str = rest_field() - """The connection URL to be used for this service. Required.""" class ListConnectionsResponse(_model_base.Model): diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 35d32436a4c8..62eba59f6a09 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -1,4 +1,5 @@ # pylint: disable=too-many-lines +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -231,7 +232,7 @@ def _refresh_token(self) -> None: connection = project_client.connections.get(connection_name=self._connection_name, with_credentials=True) self._sas_token = "" - if connection.token_credential is not None: + if connection is not None and connection.token_credential is not None: sas_credential = cast(SASTokenCredential, connection.token_credential) self._sas_token = sas_credential._sas_token self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) @@ -388,7 +389,7 @@ def _build_function_definitions(self, functions: Dict[str, Any]) -> List[Functio docstring = inspect.getdoc(func) or "" description = docstring.split("\n")[0] if docstring else "No description" - param_descs = {} + param_descriptions = {} for line in docstring.splitlines(): line = line.strip() match = param_pattern.match(line) @@ -397,13 +398,13 @@ def _build_function_definitions(self, functions: Dict[str, Any]) -> List[Functio param_name = groups.get("name") param_desc = groups.get("description") param_desc = param_desc.strip() if param_desc else "No description" - param_descs[param_name] = param_desc.strip() + param_descriptions[param_name] = param_desc.strip() properties = {} required = [] for param_name, param in params.items(): param_type_info = _map_type(param.annotation) - param_description = param_descs.get(param_name, "No description") + param_description = param_descriptions.get(param_name, "No description") properties[param_name] = {**param_type_info, "description": param_description} @@ -501,12 +502,12 @@ class AzureAISearchTool(Tool): def __init__(self): self.index_list = [] - def add_index(self, index: str): + def add_index(self, index: str, name: str): """ Add an index ID to the list of indices used to search. """ # TODO - self.index_list.append(IndexResource(index_connection_id=index)) + self.index_list.append(IndexResource(index_connection_id=index, index_name=name)) @property def definitions(self) -> List[ToolDefinition]: @@ -588,15 +589,6 @@ def updateConnections(self, connection_list: List[Tuple[str, str]]) -> None: """ -class FileSearchTool(Tool): - """ - A tool that searches for uploaded file information from the created vector stores. - """ - - def __init__(self, vector_store_ids: List[str] = []): - self.vector_store_ids = vector_store_ids - - class FileSearchTool(Tool): """ A tool that searches for uploaded file information from the created vector stores. @@ -844,7 +836,7 @@ def execute_tool_calls(self, tool_calls: List[Any]) -> Any: class AsyncToolSet(BaseToolSet): """ - A collection of tools that can be used by an asynchronize agent. + A collection of tools that can be used by an asynchronous agent. """ def validate_tool_type(self, tool: Tool) -> None: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index eac6665e9878..f9f4620a7a90 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -10,8 +10,8 @@ """ import sys, io, logging, os, time from azure.core.exceptions import ResourceNotFoundError -from io import IOBase, TextIOWrapper -from typing import List, Iterable, Union, IO, Any, Dict, Optional, overload, TYPE_CHECKING, Iterator, cast +from io import TextIOWrapper +from typing import List, Union, IO, Any, Dict, Optional, overload, Sequence, TYPE_CHECKING, Iterator, cast from pathlib import Path from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated @@ -23,8 +23,9 @@ ListConnectionsResponse, GetAppInsightsResponse, GetWorkspaceResponse, + InternalConnectionPropertiesSASAuth, ) -from .._types import AgentsApiResponseFormatOption + from ..models._patch import ConnectionProperties from ..models._enums import FilePurpose from .._vendor import FileType @@ -40,6 +41,9 @@ if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from .. import _types + from azure.ai.inference import ChatCompletionsClient, EmbeddingsClient + from openai import AzureOpenAI + from azure.identity import get_bearer_token_provider JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() @@ -53,7 +57,7 @@ def __init__(self, outer_instance): self._outer_instance = outer_instance @distributed_trace - def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": + def get_chat_completions_client(self, **kwargs) -> "Optional[ChatCompletionsClient]": """Get an authenticated ChatCompletionsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource. At least one AI model that supports chat completions must be deployed in this resource. The package `azure-ai-inference` must be installed prior to calling this method. @@ -118,7 +122,7 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": return client @distributed_trace - def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": + def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": """Get an authenticated EmbeddingsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource. At least one AI model that supports text embeddings must be deployed in this resource. The package `azure-ai-inference` must be installed prior to calling this method. @@ -183,7 +187,7 @@ def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": return client @distributed_trace - def get_azure_openai_client(self, *, api_version: Union[str, None] = None, **kwargs) -> "AzureOpenAI": + def get_azure_openai_client(self, *, api_version: Optional[str] = None, **kwargs) -> "AzureOpenAI": """Get an authenticated AzureOpenAI client (from the `openai` package) for the default Azure OpenAI connection. The package `openai` must be installed prior to calling this method. @@ -216,16 +220,21 @@ def get_azure_openai_client(self, *, api_version: Union[str, None] = None, **kwa client = AzureOpenAI( api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=api_version ) - elif connection.authentication_type == AuthenticationType.AAD: - logger.debug( - "[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using Entra ID authentication" - ) + elif ( + connection.authentication_type == AuthenticationType.AAD + or connection.authentication_type == AuthenticationType.SAS + ): try: from azure.identity import get_bearer_token_provider except ModuleNotFoundError as _: raise ModuleNotFoundError( "azure.identity package not installed. Please install it using 'pip install azure.identity'" ) + if connection.authentication_type == AuthenticationType.AAD: + auth = "Creating AzureOpenAI using Entra ID authentication" + else: + auth = "Creating AzureOpenAI using SAS authentication" + logger.debug(f"[InferenceOperations.get_azure_openai_client] {auth}") client = AzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( @@ -234,15 +243,6 @@ def get_azure_openai_client(self, *, api_version: Union[str, None] = None, **kwa azure_endpoint=connection.endpoint_url, api_version=api_version, ) - elif connection.authentication_type == AuthenticationType.SAS: - logger.debug("[InferenceOperations.get_azure_openai_client] Creating AzureOpenAI using SAS authentication") - client = AzureOpenAI( - azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" - ), - azure_endpoint=connection.endpoint_url, - api_version=api_version, - ) else: raise ValueError("Unknown authentication type") @@ -254,7 +254,7 @@ class ConnectionsOperations(ConnectionsOperationsGenerated): @distributed_trace def get_default( self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: + ) -> Optional[ConnectionProperties]: """Get the properties of the default connection of a certain connection type, with or without populating authentication credentials. @@ -283,7 +283,9 @@ def get_default( return None @distributed_trace - def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: + def get( + self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any + ) -> Optional[ConnectionProperties]: """Get the properties of a single connection, given its connection name, with or without populating authentication credentials. @@ -310,8 +312,10 @@ def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: elif connection.properties.auth_type == AuthenticationType.SAS: from ..models._patch import SASTokenCredential + cred_prop = cast(InternalConnectionPropertiesSASAuth, connection.properties) + token_credential = SASTokenCredential( - sas_token=connection.properties.credentials.sas, + sas_token=cred_prop.credentials.sas, credential=self._config.credential, subscription_id=self._config.subscription_id, resource_group_name=self._config.resource_group_name, @@ -329,14 +333,14 @@ def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: return ConnectionProperties(connection=connection) @distributed_trace - def list(self, *, connection_type: Union[ConnectionType, None] = None, **kwargs: Any) -> Iterable[ConnectionProperties]: + def list(self, *, connection_type: Optional[ConnectionType] = None, **kwargs: Any) -> Sequence[ConnectionProperties]: """List the properties of all connections, or all connections of a certain connection type. :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. If not provided, all connections are listed. :type connection_type: ~azure.ai.projects.models._models.ConnectionType :return: A list of connection properties - :rtype: Iterable[~azure.ai.projects.models._models.ConnectionProperties] + :rtype: Sequence[~azure.ai.projects.models._models.ConnectionProperties] :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) @@ -374,13 +378,17 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str, None], **kwargs) -> "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" ) try: - from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter # type: ignore except ModuleNotFoundError as _: raise ModuleNotFoundError( "OpenTelemetry OTLP exporter is not installed. Please install it using 'pip install opentelemetry-exporter-otlp-proto-grpc'" ) trace.set_tracer_provider(TracerProvider()) - trace.get_tracer_provider().add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint=destination))) + # get_tracer_provider returns opentelemetry.trace.TracerProvider + # however, we have opentelemetry.sdk.trace.TracerProvider, which implements + # add_span_processor method, though we need to cast it to fix type checking. + tp = cast(TracerProvider, trace.get_tracer_provider()) + tp.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint=destination))) elif isinstance(destination, TextIOWrapper): if destination is sys.stdout: @@ -394,7 +402,11 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str, None], **kwargs) -> "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" ) trace.set_tracer_provider(TracerProvider()) - trace.get_tracer_provider().add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) + # get_tracer_provider returns opentelemetry.trace.TracerProvider + # however, we have opentelemetry.sdk.trace.TracerProvider, which implements + # add_span_processor method, though we need to cast it to fix type checking. + tp = cast(TracerProvider, trace.get_tracer_provider()) + tp.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) else: raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIOWrapper`") @@ -403,14 +415,14 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str, None], **kwargs) -> from azure.core.settings import settings settings.tracing_implementation = "opentelemetry" - _ = settings.tracing_implementation() + settings.tracing_implementation() except ModuleNotFoundError as _: logger.warning( "Azure SDK tracing plugin is not installed. Please install it using 'pip install azure-core-tracing-opentelemetry'" ) try: - from azure.ai.inference.tracing import AIInferenceInstrumentor + from azure.ai.inference.tracing import AIInferenceInstrumentor # type: ignore instrumentor = AIInferenceInstrumentor() if not instrumentor.is_instrumented(): @@ -430,7 +442,7 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str, None], **kwargs) -> logger.warning("Could not call `AIAgentsInstrumentor().instrument()` " + str(exc)) try: - from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor # type: ignore OpenAIInstrumentor().instrument() except ModuleNotFoundError as _: @@ -439,7 +451,7 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str, None], **kwargs) -> ) try: - from opentelemetry.instrumentation.langchain import LangchainInstrumentor + from opentelemetry.instrumentation.langchain import LangchainInstrumentor # type: ignore LangchainInstrumentor().instrument() except ModuleNotFoundError as _: @@ -457,7 +469,7 @@ def __init__(self, *args, **kwargs): self._outer_instance = kwargs.pop("outer_instance") super().__init__(*args, **kwargs) - def get_connection_string(self) -> None: + def get_connection_string(self) -> Optional[str]: """ Get the Application Insights connection string associated with the Project's Application Insights resource. On first call, this method makes a GET call to the Application Insights resource URL to get the connection string. @@ -2009,7 +2021,7 @@ def upload_file( @overload def upload_file( - self, file_path: str, *, purpose: Union[str, _models.FilePurpose], **kwargs: Any + self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any ) -> _models.OpenAIFile: """Uploads a file for use by other operations. @@ -2077,7 +2089,7 @@ def upload_file( raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") @overload - def upload_file_and_poll(self, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: + def upload_file_and_poll(self, body: JSON, *, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. :param body: Required. @@ -2119,7 +2131,7 @@ def upload_file_and_poll( @overload def upload_file_and_poll( - self, file_path: str, *, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any + self, *, file_path: str, purpose: Union[str, _models.FilePurpose], sleep_interval: float = 1, **kwargs: Any ) -> _models.OpenAIFile: """Uploads a file for use by other operations. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py index 85e5b08f840f..d2557ab74a23 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py @@ -1,6 +1,4 @@ # pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -23,7 +21,7 @@ from azure.ai.projects.operations import AgentsOperations from azure.ai.projects.aio.operations import AgentsOperations as AsyncAgentOperations from azure.ai.projects.models import _models, AgentRunStream -from azure.ai.projects.models._enums import MessageRole, RunStepStatus +from azure.ai.projects.models._enums import MessageRole, RunStepStatus, AgentsApiResponseFormatMode from azure.ai.projects.models._models import ( MessageAttachment, MessageDeltaChunk, @@ -89,24 +87,23 @@ def __init__(self): # and have a parameter that specifies the version to use. self._impl = _AIAgentsInstrumentorPreview() - def instrument(self, enable_content_recording: bool = None) -> None: + def instrument(self, enable_content_recording: Optional[bool] = None) -> None: """ Enable trace instrumentation for AI Agents. :param enable_content_recording: Whether content recording is enabled as part - of the traces or not. Content in this context refers to chat message content - and function call tool related function names, function parameter names and - values. True will enable content recording, False will disable it. If no value - is provided, then the value read from environment variable - AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable - is not found, then the value will default to False. Please note that successive calls - to insturment will always apply the content recording value provided with the most - recent call to instrument (including applying the environment variable if no value is - provided and defaulting to false if the environment variable is not found), even if - instrument was already previously called without uninstrument being called in between - the instrument calls. - - :type enable_content_recording: bool, optional + of the traces or not. Content in this context refers to chat message content + and function call tool related function names, function parameter names and + values. True will enable content recording, False will disable it. If no value + is provided, then the value read from environment variable + AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED is used. If the environment variable + is not found, then the value will default to False. Please note that successive calls + to instrument will always apply the content recording value provided with the most + recent call to instrument (including applying the environment variable if no value is + provided and defaulting to false if the environment variable is not found), even if + instrument was already previously called without uninstrument being called in between + the instrument calls. + :type enable_content_recording: bool, optional """ self._impl.instrument(enable_content_recording) @@ -133,7 +130,7 @@ def is_content_recording_enabled(self) -> bool: """This function gets the content recording value. :return: A bool value indicating whether content recording is enabled. - :rtype bool + :rtype: bool """ return self._impl.is_content_recording_enabled() @@ -151,7 +148,7 @@ def _str_to_bool(self, s): return False return str(s).lower() == "true" - def instrument(self, enable_content_recording: bool = None): + def instrument(self, enable_content_recording: Optional[bool] = None): """ Enable trace instrumentation for AI Agents. @@ -409,6 +406,24 @@ def set_end_run(self, span: "AbstractSpan", run: ThreadRun) -> None: span.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, run.usage.prompt_tokens) span.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, run.usage.completion_tokens) + @staticmethod + def agent_api_response_to_str(response_format: Any) -> Optional[str]: + """ + Convert response_format to string. + + :param response_format: The response format. + :type response_format: ~azure.ai.projects._types.AgentsApiResponseFormatOption + :returns: string for the response_format. + :raises: Value error if response_format is not of type AgentsApiResponseFormatOption. + """ + if isinstance(response_format, str) or response_format is None: + return response_format + if isinstance(response_format, AgentsApiResponseFormatMode): + return response_format.value + if isinstance(response_format, _models.AgentsApiResponseFormat): + return response_format.type + raise ValueError(f"Unknown response format {type(response_format)}") + def start_thread_run_span( self, operation_name: OperationName, @@ -436,7 +451,7 @@ def start_thread_run_span( top_p=top_p, max_prompt_tokens=max_prompt_tokens, max_completion_tokens=max_completion_tokens, - response_format=response_format.value if response_format else None, + response_format=_AIAgentsInstrumentorPreview.agent_api_response_to_str(response_format), ) if span and span.span_instance.is_recording and instructions and additional_instructions: self._add_instructions_event( @@ -495,7 +510,7 @@ def start_create_agent_span( model=model, temperature=temperature, top_p=top_p, - response_format=response_format.value if response_format else None, + response_format=_AIAgentsInstrumentorPreview.agent_api_response_to_str(response_format), ) if span and span.span_instance.is_recording: if name: @@ -1466,7 +1481,9 @@ def _is_content_recording_enabled(self) -> bool: class _AgentEventHandlerTraceWrapper(AgentEventHandler): - def __init__(self, inner_handler: AgentEventHandler, instrumentor: _AIAgentsInstrumentorPreview, span: "AbstractSpan"): + def __init__( + self, inner_handler: AgentEventHandler, instrumentor: _AIAgentsInstrumentorPreview, span: "AbstractSpan" + ): super().__init__() self.span = span self.inner_handler = inner_handler diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index dd9010f2f644..e4683958e440 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -1,7 +1,4 @@ # pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines # # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -17,7 +14,17 @@ import sys from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import FunctionTool, CodeInterpreterTool, FileSearchTool, ToolSet, CodeInterpreterToolResource, FileSearchToolResource, ToolResources, OpenAIFile, FilePurpose +from azure.ai.projects.models import ( + FunctionTool, + CodeInterpreterTool, + FileSearchTool, + ToolSet, + CodeInterpreterToolResource, + FileSearchToolResource, + ToolResources, + OpenAIFile, + FilePurpose, +) from azure.core.pipeline.transport import RequestsTransport from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy from azure.core.exceptions import AzureError, ServiceRequestError, HttpResponseError @@ -1164,7 +1171,6 @@ def test_create_agent_with_invalid_file_search_tool_resource(self, **kwargs): == "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" ) - @agentClientPreparer() @recorded_by_proxy def test_code_interpreter_and_save_file(self, **kwargs): @@ -1173,18 +1179,18 @@ def test_code_interpreter_and_save_file(self, **kwargs): # create client with self.create_client(**kwargs) as client: file: OpenAIFile = None - - with tempfile.TemporaryDirectory(delete=True) as temp_dir: - + + with tempfile.TemporaryDirectory() as temp_dir: + # create a temporary input file for upload test_file_path = os.path.join(temp_dir, "input.txt") - + with open(test_file_path, "w") as f: f.write("This is a test file") - - file = client.agents.upload_file_and_poll(file_path=test_file_path, purpose=FilePurpose.AGENTS) - - # create agent + + file = client.agents.upload_file_and_poll(file_path=test_file_path, purpose=FilePurpose.AGENTS) + + # create agent code_interpreter = CodeInterpreterTool(file_ids=[file.id]) agent = client.agents.create_agent( model="gpt-4-1106-preview", @@ -1223,11 +1229,10 @@ def test_code_interpreter_and_save_file(self, **kwargs): print(f"Last Message: {last_msg.text.value}") for file_path_annotation in messages.file_path_annotations: - file_id = file_path_annotation.file_path.file_id + file_id = file_path_annotation.file_path.file_id print(f"Image File ID: {file_path_annotation.file_path.file_id}") temp_file_path = os.path.join(temp_dir, "output.png") - client.agents.save_file(file_id=file_id, file_name="output.png", target_dir=temp_dir) + client.agents.save_file(file_id=file_id, file_name="output.png", target_dir=temp_dir) output_file_exist = os.path.exists(temp_file_path) - - assert output_file_exist + assert output_file_exist diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py index 8be0e6de97c2..9e505e8b272f 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py @@ -105,19 +105,15 @@ def test_mock_subscription_refresh_token(self): seconds=token_duration_sec ) new_token_credential = self._get_fake_token(new_expiration) - + mock_properties = MagicMock() - mock_properties.auth_type = 'sas_token' - mock_properties.category = 'fake_category' - mock_properties.target = 'microsoft.com' - mock_properties.credentials.key='very secret key' - conn_resp = GetConnectionResponse( - id = '12334', - name = 'Fake_connection', - properties = mock_properties - ) + mock_properties.auth_type = "sas_token" + mock_properties.category = "fake_category" + mock_properties.target = "microsoft.com" + mock_properties.credentials.key = "very secret key" + conn_resp = GetConnectionResponse(id="12334", name="Fake_connection", properties=mock_properties) conn = ConnectionProperties(connection=conn_resp, token_credential=new_token_credential) - with patch('azure.ai.projects.operations.ConnectionsOperations.get', return_value=conn): + with patch("azure.ai.projects.operations.ConnectionsOperations.get", return_value=conn): new_token = sas_token_credential.get_token() assert new_token.expires_on == int(new_expiration.timestamp()) diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/test_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/tests/telemetry/test_ai_agents_instrumentor.py new file mode 100644 index 000000000000..ba98f64700fb --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/telemetry/test_ai_agents_instrumentor.py @@ -0,0 +1,31 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import pytest +from azure.ai.projects.telemetry.agents._ai_agents_instrumentor import _AIAgentsInstrumentorPreview +from azure.ai.projects.models import AgentsApiResponseFormatMode, AgentsApiResponseFormat + + +class TestAiAgentsInstrumentor: + """Tests for AI agents instrumentor.""" + + def test_convert_api_response_format_exception(self): + """Test that the exception is raised if agent_api_response_to_str is given wrong type.""" + with pytest.raises(ValueError) as cm: + _AIAgentsInstrumentorPreview.agent_api_response_to_str(42) + assert "Unknown response format " in cm.value.args[0] + + @pytest.mark.parametrize( + "fmt,expected", + [ + (None, None), + ("neep", "neep"), + (AgentsApiResponseFormatMode.AUTO, "auto"), + (AgentsApiResponseFormat(type="test"), "test"), + ], + ) + def test_convert_api_response_format(self, fmt, expected): + """Test conversion of AgentsApiResponseFormatOption to string""" + actual = _AIAgentsInstrumentorPreview.agent_api_response_to_str(fmt) + assert actual == expected diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index 325c3e32605b..26d7771f403f 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: bf15d7700a3eb48e9c5663b92837fae280923405 +commit: 89ff4b214a55d27f3dc78d3c2ddf9a9523622d5d repo: Azure/azure-rest-api-specs additionalDirectories: From 8b6e88f751c4922d37450406883f069a25eccc3b Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 6 Nov 2024 08:46:17 -0800 Subject: [PATCH 081/138] Use DefaultAzureCredential from azure.identity.aio for async samples (#38362) --- .../samples/agents/async_samples/sample_agents_basics_async.py | 2 +- .../sample_agents_basics_async_with_azure_monitor_tracing.py | 2 +- .../sample_agents_basics_async_with_console_tracing.py | 2 +- .../async_samples/sample_agents_code_interpreter_async.py | 2 +- .../agents/async_samples/sample_agents_functions_async.py | 2 +- .../async_samples/sample_agents_run_with_toolset_async.py | 2 +- .../async_samples/sample_agents_stream_eventhandler_async.py | 2 +- .../sample_agents_stream_eventhandler_with_toolset_async.py | 2 +- .../async_samples/sample_agents_stream_iteration_async.py | 2 +- .../sample_agents_vector_store_batch_file_search_async.py | 2 +- .../sample_agents_with_file_search_attachment_async.py | 2 +- .../connections/async_samples/sample_connections_async.py | 2 +- .../evaluations/async_samples/sample_evaluations_async.py | 2 +- ...ple_chat_completions_with_azure_ai_inference_client_async.py | 2 +- .../sample_chat_completions_with_azure_openai_client_async.py | 2 +- ...mple_text_embeddings_with_azure_ai_inference_client_async.py | 2 +- 16 files changed, 16 insertions(+), 16 deletions(-) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py index d50fed2268df..f77fb6738625 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py @@ -24,7 +24,7 @@ import time from azure.ai.projects.aio import AIProjectClient -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential import os diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py index f5dc0a072846..b205684bb8a8 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py @@ -27,7 +27,7 @@ import time import sys from azure.ai.projects.aio import AIProjectClient -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.tracing.agents import AIAgentsInstrumentor from opentelemetry import trace import os diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py index fded860098c4..c9d08f754472 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py @@ -32,7 +32,7 @@ import time import sys from azure.ai.projects.aio import AIProjectClient -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential from opentelemetry import trace import os diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py index 224d8eb3a318..36e7028d1f2e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py @@ -25,7 +25,7 @@ from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import CodeInterpreterTool from azure.ai.projects.models import FilePurpose -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential from pathlib import Path import os diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py index 4459a7b9e5c8..42d5ea9e318e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py @@ -25,7 +25,7 @@ from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import AsyncFunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential import os diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py index 064d86c47138..b25e00dfd36c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py @@ -23,7 +23,7 @@ import os, asyncio from azure.ai.projects.aio import AIProjectClient -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.models import AsyncFunctionTool, AsyncToolSet, CodeInterpreterTool from user_async_functions import user_async_functions diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py index d9cfe387163c..b7d1df7351e6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -32,7 +32,7 @@ ThreadRun, ) from azure.ai.projects.models._patch import AsyncAgentEventHandler -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential import os diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py index 99afd0e762ce..0359dc6e00e8 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py @@ -27,7 +27,7 @@ from azure.ai.projects.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun from azure.ai.projects.models import AsyncAgentEventHandler, AsyncFunctionTool, AsyncToolSet from azure.ai.projects.aio.operations import AgentsOperations -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential import os diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py index ad644d0259cb..691ace56eb56 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py @@ -25,7 +25,7 @@ from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import AgentStreamEvent from azure.ai.projects.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential import os diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py index 22dc0f7ae75f..680806f68682 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -25,7 +25,7 @@ import os from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import FileSearchTool, FilePurpose -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential async def main(): diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py index 6dc1693dca18..048186eccaff 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -25,7 +25,7 @@ from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import FilePurpose from azure.ai.projects.models import FileSearchTool, MessageAttachment, ToolResources -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential import os diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py index 83563a1d4f04..c9a41c33525c 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -28,7 +28,7 @@ import os from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import ConnectionType, AuthenticationType -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential async def sample_connections_async(): diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py index 5e43d7b04dae..0ead6693e295 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py @@ -25,7 +25,7 @@ import os from azure.ai.projects.aio import AIProjectClient -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.models import Evaluation, Dataset, EvaluatorConfiguration, ConnectionType from azure.ai.evaluation import F1ScoreEvaluator, RelevanceEvaluator, ViolenceEvaluator diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py index 5fade8dfd3cd..576ac3c6612e 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_ai_inference_client_async.py @@ -24,7 +24,7 @@ import asyncio from azure.ai.projects.aio import AIProjectClient from azure.ai.inference.models import UserMessage -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential async def sample_get_chat_completions_client_async(): diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py index 5c5253179e7a..05bf8c76ed18 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py @@ -25,7 +25,7 @@ import os import asyncio from azure.ai.projects.aio import AIProjectClient -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential async def sample_get_azure_openai_client_async(): diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py index 10d0baaeec44..7a2f72c559a6 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_text_embeddings_with_azure_ai_inference_client_async.py @@ -23,7 +23,7 @@ import asyncio import os from azure.ai.projects.aio import AIProjectClient -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential async def sample_get_embeddings_client_async(): From eadeb596eb4386276840328d7cfe194416f1f73c Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 6 Nov 2024 09:59:56 -0800 Subject: [PATCH 082/138] Rename AAD to ENTRA_ID (#38363) --- .../azure/ai/projects/aio/operations/_patch.py | 10 +++++----- .../azure/ai/projects/models/_enums.py | 4 ++-- .../azure/ai/projects/models/_models.py | 2 +- .../azure/ai/projects/operations/_patch.py | 10 +++++----- .../async_samples/sample_connections_async.py | 4 ++-- .../samples/connections/sample_connections.py | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index c6fb912d69b0..2b6cd8e52db0 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -113,7 +113,7 @@ async def get_chat_completions_client(self, **kwargs) -> "Optional[ChatCompletio from azure.core.credentials import AzureKeyCredential client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) - elif connection.authentication_type == AuthenticationType.AAD: + elif connection.authentication_type == AuthenticationType.ENTRA_ID: # MaaS models do not yet support EntraID auth logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" @@ -178,7 +178,7 @@ async def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": from azure.core.credentials import AzureKeyCredential client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) - elif connection.authentication_type == AuthenticationType.AAD: + elif connection.authentication_type == AuthenticationType.ENTRA_ID: # MaaS models do not yet support EntraID auth logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" @@ -229,7 +229,7 @@ async def get_azure_openai_client(self, *, api_version: Optional[str] = None, ** api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=api_version ) elif ( - connection.authentication_type == AuthenticationType.AAD + connection.authentication_type == AuthenticationType.ENTRA_ID or connection.authentication_type == AuthenticationType.SAS ): @@ -239,7 +239,7 @@ async def get_azure_openai_client(self, *, api_version: Optional[str] = None, ** raise ModuleNotFoundError( "azure.identity package not installed. Please install it using 'pip install azure-identity'" ) - if connection.authentication_type == AuthenticationType.AAD: + if connection.authentication_type == AuthenticationType.ENTRA_ID: auth = "Creating AzureOpenAI using Entra ID authentication" else: auth = "Creating AzureOpenAI using SAS authentication" @@ -316,7 +316,7 @@ async def get( ) except ResourceNotFoundError as _: return None - if connection.properties.auth_type == AuthenticationType.AAD: + if connection.properties.auth_type == AuthenticationType.ENTRA_ID: return ConnectionProperties(connection=connection, token_credential=self._config.credential) elif connection.properties.auth_type == AuthenticationType.SAS: from ...models._patch import SASTokenCredential diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py index 4e9ce22d8c67..bbb0b7729939 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py @@ -137,8 +137,8 @@ class AuthenticationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): API_KEY = "ApiKey" """API Key authentication""" - AAD = "AAD" - """Entra ID authentication""" + ENTRA_ID = "AAD" + """Entra ID authentication (formerly known as AAD)""" SAS = "SAS" """Shared Access Signature (SAS) authentication""" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 4332a77f614f..cca2208f8884 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -1392,7 +1392,7 @@ class InternalConnectionPropertiesAADAuth(InternalConnectionProperties, discrimi :vartype auth_type: str or ~azure.ai.projects.models.AAD """ - auth_type: Literal[AuthenticationType.AAD] = rest_discriminator(name="authType") # type: ignore + auth_type: Literal[AuthenticationType.ENTRA_ID] = rest_discriminator(name="authType") # type: ignore """Authentication type of the connection target. Required. Entra ID authentication""" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index f9f4620a7a90..87afff214e34 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -104,7 +104,7 @@ def get_chat_completions_client(self, **kwargs) -> "Optional[ChatCompletionsClie from azure.core.credentials import AzureKeyCredential client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) - elif connection.authentication_type == AuthenticationType.AAD: + elif connection.authentication_type == AuthenticationType.ENTRA_ID: # MaaS models do not yet support EntraID auth logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" @@ -169,7 +169,7 @@ def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": from azure.core.credentials import AzureKeyCredential client = EmbeddingsClient(endpoint=endpoint, credential=AzureKeyCredential(connection.key)) - elif connection.authentication_type == AuthenticationType.AAD: + elif connection.authentication_type == AuthenticationType.ENTRA_ID: # MaaS models do not yet support EntraID auth logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using Entra ID authentication" @@ -221,7 +221,7 @@ def get_azure_openai_client(self, *, api_version: Optional[str] = None, **kwargs api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=api_version ) elif ( - connection.authentication_type == AuthenticationType.AAD + connection.authentication_type == AuthenticationType.ENTRA_ID or connection.authentication_type == AuthenticationType.SAS ): try: @@ -230,7 +230,7 @@ def get_azure_openai_client(self, *, api_version: Optional[str] = None, **kwargs raise ModuleNotFoundError( "azure.identity package not installed. Please install it using 'pip install azure.identity'" ) - if connection.authentication_type == AuthenticationType.AAD: + if connection.authentication_type == AuthenticationType.ENTRA_ID: auth = "Creating AzureOpenAI using Entra ID authentication" else: auth = "Creating AzureOpenAI using SAS authentication" @@ -307,7 +307,7 @@ def get( ) except ResourceNotFoundError as _: return None - if connection.properties.auth_type == AuthenticationType.AAD: + if connection.properties.auth_type == AuthenticationType.ENTRA_ID: return ConnectionProperties(connection=connection, token_credential=self._config.credential) elif connection.properties.auth_type == AuthenticationType.SAS: from ..models._patch import SASTokenCredential diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py index c9a41c33525c..d23d9cb3fdc5 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -86,7 +86,7 @@ async def sample_connections_async(): azure_endpoint=connection.endpoint_url, api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs ) - elif connection.authentication_type == AuthenticationType.AAD: + elif connection.authentication_type == AuthenticationType.ENTRA_ID: print("====> Creating AzureOpenAI client using Entra ID authentication") from azure.identity import get_bearer_token_provider @@ -124,7 +124,7 @@ async def sample_connections_async(): client = ChatCompletionsClient( endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) ) - elif connection.authentication_type == AuthenticationType.AAD: + elif connection.authentication_type == AuthenticationType.ENTRA_ID: # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") client = ChatCompletionsClient( diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index 626579139367..a7b9c39cbdfa 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -84,7 +84,7 @@ azure_endpoint=connection.endpoint_url, api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs ) - elif connection.authentication_type == AuthenticationType.AAD: + elif connection.authentication_type == AuthenticationType.ENTRA_ID: print("====> Creating AzureOpenAI client using Entra ID authentication") client = AzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider @@ -114,7 +114,7 @@ if connection.authentication_type == AuthenticationType.API_KEY: print("====> Creating ChatCompletionsClient using API key authentication") client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) - elif connection.authentication_type == AuthenticationType.AAD: + elif connection.authentication_type == AuthenticationType.ENTRA_ID: # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") client = ChatCompletionsClient( From cab837966f78fd4ad7fcac7e56ffa8c51ca7b357 Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Wed, 6 Nov 2024 19:22:09 -0800 Subject: [PATCH 083/138] added agents example section in readme and added tags to samples (#38351) * added agents example section in readme and added tags to samples * Resolved comments * Added more detail to readme --- sdk/ai/azure-ai-projects/README.md | 327 ++++++++++++++++++ .../sample_agents_run_with_toolset_async.py | 4 +- .../samples/agents/sample_agents_basics.py | 15 +- .../agents/sample_agents_code_interpreter.py | 2 + .../agents/sample_agents_file_search.py | 26 +- .../agents/sample_agents_run_with_toolset.py | 20 +- .../sample_agents_stream_eventhandler.py | 5 +- ...ents_stream_eventhandler_with_functions.py | 3 + ...agents_stream_eventhandler_with_toolset.py | 2 + ...e_agents_vector_store_batch_file_search.py | 4 +- ...mple_agents_with_file_search_attachment.py | 2 + 11 files changed, 388 insertions(+), 22 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 3de22e548143..ebdaea691bda 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -39,6 +39,331 @@ Use the returned token credential to authenticate the client: ## Examples +### Agents +The following steps outline the typical sequence for interacting with agents: + + - Create a project client + - Create an agent with toolset, or tools and tool resources including: + - File Search with file upload indexed by vector stores + - Code Interpreter with file upload + - Function calls + - Create a thread + - Create a message with: + - File search attachment + - Execute Run, Run_and_Process, or Stream + - Retrieve messages + - Tear down by deleting resources + + +#### Create Project Client + +To create a project client, use the following example: + + + +```python +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) +``` + + + +#### Create Agent + +Here is an example of create an agent: + + +```python + + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) +``` + + + +#### Create Agent with Toolset, or Tools and Tool Resources +In order to use tools, you can provide toolset. Here is an example: + + +```python + + functions = FunctionTool(user_functions) + code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset + ) +``` + + + +Alternatively you can provide tool and tool resources. Here is an example: + + +```python +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + +# notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file +agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, +) +``` + + + +#### Create Agent with File Upload in Vector Store for File Search +To perform file search by an agent, we first need to upload a file, create a vector store, and associate the file to the vector store. +Here is an example: + + + +```python +file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") +print(f"Uploaded file, file ID: {file.id}") + +vector_store = project_client.agents.create_vector_store_and_poll( + file_ids=[file.id], name="my_vectorstore" +) +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create file search tool with resources followed by creating agent +file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + +agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, +) +``` + + + +Again, you can define `toolset` instead of passing `tools` and `tool_resources`. + +#### Create Agent with File Upload for Code Interpreter +Here is an example to upload a file and use it for code interpreter by an agent: + + + +```python +file = project_client.agents.upload_file_and_poll( + file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.AGENTS +) +print(f"Uploaded file, file ID: {file.id}") + +code_interpreter = CodeInterpreterTool(file_ids=[file.id]) + +# notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment +agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=code_interpreter.resources, +) +``` + + + +#### Create Agent with Function Tool +You can enhance your agents by defining callback functions as function tools. These can be provided to `create_agent` via either the `toolset` parameter or the combination of `tools` and `tool_resources`. Here are the distinctions: + +- `toolset`: When using the `toolset` parameter, you provide not only the function definitions and descriptions but also their implementations. The SDK will execute these functions within the `Run` or `Stream` context. Agents will invoke these functions based on their definitions. +- `tools` and `tool_resources`: When using the `tools` and `tool_resources` parameters, only the function definitions and descriptions are provided to `create_agent`, without the implementations. The `Run` or `event handler` will raise a `requires_action` status based on the function definitions. Your code must handle this status and invoke the appropriate functions. + +For more details about calling functions by code, refer to [`sample_agents_stream_eventhandler_with_functions.py`](samples/agents/sample_agents_stream_eventhandler_with_functions.py) and [`sample_agents_functions.py`](samples/agents/sample_agents_functions.py). + +Here is an example to use [user functions](samples/agents/user_function.py) in `toolset`: + + +```python +functions = FunctionTool(user_functions) +toolset = ToolSet() +toolset.add(functions) + +agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset +) +``` + + + +For asynchronized functions, you must import `AIProjectClient` from `azure.ai.projects.aio` and use `AsyncFunctionTool`. Here is an example using [asynchronized user functions](samples/agents/async_samples/user_async_functions.py): + +```python +from azure.ai.projects.aio import AIProjectClient +``` + + + +```python +functions = AsyncFunctionTool(user_async_functions) + +toolset = AsyncToolSet() +toolset.add(functions) + +agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset +) +``` + + + +#### Create Message + +To create a message for assistant to process, you pass `user` as `role` and a question as `content`: + + + +```python + + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + ) +``` + + + +#### Create Message with File Search Attachment +To attach a file to a message, you need to `MessageAttachment` and `FileSearchTool`: + + + +```python +attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) +message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] +) +``` + + + +#### Create Run, Run_and_Process, or Stream + +To process your message, you can use `create_run`, `create_and_process_run`, or `create_stream`. + +Here is an example using `create_run`: + + + +```python +run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + +# poll the run as long as run status is queued or in progress +while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) +``` + + + +Here is an example using `create_and_process_run`: + + + +```python +run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) +``` + + + +Here is an example using `create_stream`: + + + +```python + + with project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() + ) as stream: + stream.until_done() +``` + + + +The event handler is optional. Here is an example: + + + +```python + +class MyEventHandler(AgentEventHandler): + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") + + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") + + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") + + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") + + def on_done(self) -> None: + print("Stream completed.") + + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") +``` + + + + + +#### Retrieve Messages + +To retrieve messages from agents, use the following example: + + + +```python +messages = project_client.agents.list_messages(thread_id=thread.id) +``` + + + +#### Teardown + +To remove resources after completing tasks, use the following functions: + + + +```python +# Delete the file when done +project_client.agents.delete_vector_store(vector_store.id) +print("Deleted vector store") + +project_client.agents.delete_file(file_id=file.id) +print("Deleted file") + +# Delete the agent when done +project_client.agents.delete_agent(agent.id) +print("Deleted agent") +``` + +## Examples + ```python >>> from azure.ai.projects import AIProjectClient >>> from azure.identity import DefaultAzureCredential @@ -78,3 +403,5 @@ additional questions or comments. [pip]: https://pypi.org/project/pip/ [azure_sub]: https://azure.microsoft.com/free/ + + diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py index b25e00dfd36c..91b38d7174c1 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py @@ -43,16 +43,16 @@ async def main(): async with project_client: # Initialize agent toolset with user functions and code interpreter + # [START create_agent_with_async_function_tool] functions = AsyncFunctionTool(user_async_functions) - code_interpreter = CodeInterpreterTool() toolset = AsyncToolSet() toolset.add(functions) - toolset.add(code_interpreter) agent = await project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset ) + # [END create_agent_with_async_function_tool] print(f"Created agent, ID: {agent.id}") # Create thread for communication diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py index 46e638eeb828..a9194ad747a7 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py @@ -29,23 +29,33 @@ # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables +# [START create_project_client] project_client = AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) +# [END create_project_client] with project_client: + + # [START create_agent] agent = project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" ) + # [END create_agent] print(f"Created agent, agent ID: {agent.id}") + # [START create_thread] thread = project_client.agents.create_thread() + # [END create_thread] print(f"Created thread, thread ID: {thread.id}") + # [START create_message] message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + # [END create_message] print(f"Created message, message ID: {message.id}") + # [START create_run] run = project_client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) # poll the run as long as run status is queued or in progress @@ -53,11 +63,14 @@ # wait for a second time.sleep(1) run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) - + # [END create_run] print(f"Run status: {run.status}") project_client.agents.delete_agent(agent.id) print("Deleted agent") + # [START list_messages] messages = project_client.agents.list_messages(thread_id=thread.id) + # [END list_messages] + print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py index 4b02174acdef..3f06dd335373 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py @@ -39,6 +39,7 @@ with project_client: # upload a file and wait for it to be processed + # [START upload_file_and_creae_agent_with_code_interpreter] file = project_client.agents.upload_file_and_poll( file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.AGENTS ) @@ -54,6 +55,7 @@ tools=code_interpreter.definitions, tool_resources=code_interpreter.resources, ) + # [END upload_file_and_creae_agent_with_code_interpreter] print(f"Created agent, agent ID: {agent.id}") thread = project_client.agents.create_thread() diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py index cc8764b7e040..c16e6f72056b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py @@ -37,18 +37,19 @@ with project_client: - openai_file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") - print(f"Uploaded file, file ID: {openai_file.id}") + # Upload file and create vector store + # [START upload_file_create_vector_store_and_agent_with_file_search_tool] + file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") + print(f"Uploaded file, file ID: {file.id}") - openai_vectorstore = project_client.agents.create_vector_store_and_poll( - file_ids=[openai_file.id], name="my_vectorstore" + vector_store = project_client.agents.create_vector_store_and_poll( + file_ids=[file.id], name="my_vectorstore" ) - print(f"Created vector store, vector store ID: {openai_vectorstore.id}") + print(f"Created vector store, vector store ID: {vector_store.id}") - # Create file search tool with resources - file_search = FileSearchTool(vector_store_ids=[openai_vectorstore.id]) + # Create file search tool with resources followed by creating agent + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) - # Create agent with file search tool and process assistant run agent = project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", @@ -56,6 +57,8 @@ tools=file_search.definitions, tool_resources=file_search.resources, ) + # [END upload_file_create_vector_store_and_agent_with_file_search_tool] + print(f"Created agent, ID: {agent.id}") # Create thread for communication @@ -76,13 +79,18 @@ # Check if you got "Rate limit is exceeded.", then you want to get more quota print(f"Run failed: {run.last_error}") + # [START teardown] # Delete the file when done - project_client.agents.delete_vector_store(openai_vectorstore.id) + project_client.agents.delete_vector_store(vector_store.id) print("Deleted vector store") + project_client.agents.delete_file(file_id=file.id) + print("Deleted file") + # Delete the agent when done project_client.agents.delete_agent(agent.id) print("Deleted agent") + # [END teardown] # Fetch and log all messages messages = project_client.agents.list_messages(thread_id=thread.id) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py index f66e314abdaf..9899e4b2ad1a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py @@ -37,19 +37,21 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) -# Initialize agent toolset with user functions and code interpreter -functions = FunctionTool(user_functions) -code_interpreter = CodeInterpreterTool() - -toolset = ToolSet() -toolset.add(functions) -toolset.add(code_interpreter) - # Create agent with toolset and process assistant run with project_client: + # Initialize agent toolset with user functions and code interpreter + # [START create_agent_toolset] + functions = FunctionTool(user_functions) + code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + agent = project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset ) + # [END create_agent_toolset] print(f"Created agent, ID: {agent.id}") # Create thread for communication @@ -65,7 +67,9 @@ print(f"Created message, ID: {message.id}") # Create and process agent run in thread with tools + # [START create_and_process_run] run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + # [END create_and_process_run] print(f"Run finished with status: {run.status}") if run.status == "failed": diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py index e7f7706090fb..bc92f89450aa 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py @@ -46,7 +46,7 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) - +# [START stream_event_handler] class MyEventHandler(AgentEventHandler): def on_message_delta(self, delta: "MessageDeltaChunk") -> None: for content_part in delta.delta.content: @@ -71,6 +71,7 @@ def on_done(self) -> None: def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") +# [END stream_event_handler] with project_client: @@ -86,10 +87,12 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID {message.id}") + # [START create_stream] with project_client.agents.create_stream( thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() ) as stream: stream.until_done() + # [END create_stream] project_client.agents.delete_agent(agent.id) print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py index 83d5eefef99d..551009dd4f6e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py @@ -100,6 +100,8 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: with project_client: + + # [START create_agent_with_function_tool] functions = FunctionTool(user_functions) agent = project_client.agents.create_agent( @@ -108,6 +110,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: instructions="You are a helpful assistant", tools=functions.definitions, ) + # [END create_agent_with_function_tool] print(f"Created agent, ID: {agent.id}") thread = project_client.agents.create_thread() diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index fab7a393bdd3..e97952601539 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -85,6 +85,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: with project_client: + # [START create_agent_with_function_tool] functions = FunctionTool(user_functions) toolset = ToolSet() toolset.add(functions) @@ -92,6 +93,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: agent = project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset ) + # [END create_agent_with_function_tool] print(f"Created agent, ID: {agent.id}") thread = project_client.agents.create_thread() diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py index 89e5f6a9c177..d65ba2b6abd3 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -52,6 +52,7 @@ print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") # create a file search tool + # [START create_agent_with_tools_and_tool_resources] file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file @@ -62,6 +63,7 @@ tools=file_search_tool.definitions, tool_resources=file_search_tool.resources, ) + # [END create_agent_with_tools_and_tool_resources] print(f"Created agent, agent ID: {agent.id}") thread = project_client.agents.create_thread() @@ -78,7 +80,7 @@ file_search_tool.remove_vector_store(vector_store.id) print(f"Removed vector store from file search, vector store ID: {vector_store.id}") - await project_client.agents.update_agent( + project_client.agents.update_agent( assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources ) print(f"Updated agent, agent ID: {agent.id}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py index 9710e3812f8c..711614dee81b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py @@ -56,10 +56,12 @@ # Create a message with the file search attachment # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + # [START create_message_with_attachment] attachment = MessageAttachment(file_id=file.id, tools=FileSearchTool().definitions) message = project_client.agents.create_message( thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?", attachments=[attachment] ) + # [END create_message_with_attachment] print(f"Created message, message ID: {message.id}") run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) From a4f0b1d46449d3bf7f775037ea040f34d411cad5 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Wed, 6 Nov 2024 19:48:42 -0800 Subject: [PATCH 084/138] Fix mypy and do not scan examples (#38375) * Changes in a package * Fix * Fix mypy * fix mypy * Fix --- .../azure/ai/projects/_patch.py | 2 +- .../azure/ai/projects/aio/_patch.py | 2 +- .../ai/projects/aio/operations/_patch.py | 7 +++--- .../azure/ai/projects/operations/_patch.py | 11 ++++----- sdk/ai/azure-ai-projects/pyproject.toml | 10 ++++++++ .../agents/sample_agents_azure_ai_search.py | 2 +- ...gents_basics_with_azure_monitor_tracing.py | 2 +- .../samples/agents/sample_agents_functions.py | 15 ++++++++---- ...ts_functions_with_azure_monitor_tracing.py | 16 ++++++++----- ...e_agents_functions_with_console_tracing.py | 15 +++++++----- ...ents_stream_eventhandler_with_functions.py | 23 +++++++++++-------- 11 files changed, 65 insertions(+), 40 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/pyproject.toml diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index 05c6e449eca7..c0f3a1494b2c 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -249,7 +249,7 @@ def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: ) data = Data( - path=file_path, + path=str(file_path), type=AssetTypes.URI_FILE, name=str(uuid.uuid4()), # generating random name is_anonymous=True, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 14b3f16af77f..ca13bf2cf3f9 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -252,7 +252,7 @@ def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: ) data = Data( - path=file_path, + path=str(file_path), type=AssetTypes.URI_FILE, name=str(uuid.uuid4()), # generating random name is_anonymous=True, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 2b6cd8e52db0..d609d3f91dae 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -8,7 +8,7 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ from ..._vendor import FileType -import sys, io, asyncio +import io, asyncio import logging import os import time @@ -28,6 +28,7 @@ overload, Sequence, TYPE_CHECKING, + TextIO ) from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated @@ -404,12 +405,12 @@ async def get_connection_string(self) -> Optional[str]: # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? # TODO: This could be a class method. But we don't have a class property AIProjectClient.telemetry - def enable(self, *, destination: Union[TextIOWrapper, str], **kwargs) -> None: + def enable(self, *, destination: Union[TextIO, str], **kwargs) -> None: """Enable tracing to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) collector. :keyword destination: `sys.stdout` for tracing to console output, or a string holding the endpoint URL of the OpenTelemetry Protocol (OTLP) collector. Required. - :paramtype destination: Union[TextIOWrapper, str] + :paramtype destination: Union[TextIO, str] """ _enable_telemetry(destination=destination, **kwargs) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 87afff214e34..a2cf113f9920 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -11,7 +11,7 @@ import sys, io, logging, os, time from azure.core.exceptions import ResourceNotFoundError from io import TextIOWrapper -from typing import List, Union, IO, Any, Dict, Optional, overload, Sequence, TYPE_CHECKING, Iterator, cast +from typing import List, Union, IO, Any, Dict, Optional, overload, Sequence, TYPE_CHECKING, Iterator, TextIO, cast from pathlib import Path from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated @@ -357,14 +357,14 @@ def list(self, *, connection_type: Optional[ConnectionType] = None, **kwargs: An # Internal helper function to enable tracing, used by both sync and async clients -def _enable_telemetry(destination: Union[TextIOWrapper, str, None], **kwargs) -> None: +def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: """Enable tracing to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) endpoint. :keyword destination: `sys.stdout` for tracing to console output, or a string holding the OpenTelemetry protocol (OTLP) endpoint. If not provided, this method enables instrumentation, but does not configure OpenTelemetry SDK to export traces. - :paramtype destination: Union[TextIOWrapper, str, None] + :paramtype destination: Union[TextIO, str, None] """ if isinstance(destination, str): # `destination` is the OTLP endpoint @@ -415,7 +415,6 @@ def _enable_telemetry(destination: Union[TextIOWrapper, str, None], **kwargs) -> from azure.core.settings import settings settings.tracing_implementation = "opentelemetry" - settings.tracing_implementation() except ModuleNotFoundError as _: logger.warning( "Azure SDK tracing plugin is not installed. Please install it using 'pip install azure-core-tracing-opentelemetry'" @@ -497,7 +496,7 @@ def get_connection_string(self) -> Optional[str]: # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? # TODO: This could be a class method. But we don't have a class property AIProjectClient.telemetry - def enable(self, *, destination: Union[TextIOWrapper, str, None] = None, **kwargs) -> None: + def enable(self, *, destination: Union[TextIO, str, None] = None, **kwargs) -> None: """Enables telemetry collection with OpenTelemetry for Azure AI clients and popular GenAI libraries. Following instrumentations are enabled (when corresponding packages are installed): @@ -519,7 +518,7 @@ def enable(self, *, destination: Union[TextIOWrapper, str, None] = None, **kwarg endpoint such as "http://localhost:4317. If not provided, the method enables instrumentations, but does not configure OpenTelemetry SDK to export traces. - :paramtype destination: Union[TextIOWrapper, str, None] + :paramtype destination: Union[TextIO, str, None] """ _enable_telemetry(destination=destination, **kwargs) diff --git a/sdk/ai/azure-ai-projects/pyproject.toml b/sdk/ai/azure-ai-projects/pyproject.toml new file mode 100644 index 000000000000..326df8f77fd8 --- /dev/null +++ b/sdk/ai/azure-ai-projects/pyproject.toml @@ -0,0 +1,10 @@ +[tool.mypy] +python_version = "3.8" +exclude = [ + "downloaded", + "samples" +] +warn_unused_configs = true +follow_imports = "skip" +ignore_missing_imports = true +follow_imports_for_stubs = false \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py index ca600f610302..01949969e038 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py @@ -47,7 +47,7 @@ # Initialize agent AI search tool and add the search index connection id ai_search = AzureAISearchTool() -ai_search.add_index(conn_id) +ai_search.add_index(conn_id, "sample_index") # Create agent with AI search tool and process assistant run with project_client: diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py index f475d1c1ef93..fda3b45f3dc1 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py @@ -24,7 +24,7 @@ messages, which may contain personal data. False by default. """ -import os, sys, time +import os, time from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential from opentelemetry import trace diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py index 8f3ddd4a778e..203307a71c80 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py @@ -23,7 +23,12 @@ import os, time from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential -from azure.ai.projects.models import FunctionTool, SubmitToolOutputsAction, RequiredFunctionToolCall +from azure.ai.projects.models import ( + FunctionTool, + RequiredFunctionToolCall, + SubmitToolOutputsAction, + ToolOutput +) from user_functions import user_functions @@ -79,10 +84,10 @@ print(f"Executing tool call: {tool_call}") output = functions.execute(tool_call) tool_outputs.append( - { - "tool_call_id": tool_call.id, - "output": output, - } + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) ) except Exception as e: print(f"Error executing tool_call {tool_call.id}: {e}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py index 447afbe0acd2..0c7731a7f386 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py @@ -28,8 +28,12 @@ from typing import Any, Callable, Set from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential -from azure.ai.projects.models import FunctionTool, SubmitToolOutputsAction, RequiredFunctionToolCall -from user_functions import user_functions +from azure.ai.projects.models import ( + FunctionTool, + RequiredFunctionToolCall, + SubmitToolOutputsAction, + ToolOutput +) from opentelemetry import trace from azure.monitor.opentelemetry import configure_azure_monitor @@ -127,10 +131,10 @@ def fetch_weather(location: str) -> str: try: output = functions.execute(tool_call) tool_outputs.append( - { - "tool_call_id": tool_call.id, - "output": output, - } + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) ) except Exception as e: print(f"Error executing tool_call {tool_call.id}: {e}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py index b20d862ea2b4..9a8c0e34e40b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py @@ -33,8 +33,11 @@ from typing import Any, Callable, Set from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential -from azure.ai.projects.models import FunctionTool, SubmitToolOutputsAction, RequiredFunctionToolCall -from user_functions import user_functions +from azure.ai.projects.models import ( + FunctionTool, + RequiredFunctionToolCall, + SubmitToolOutputsAction, + ToolOutput) from opentelemetry import trace @@ -128,10 +131,10 @@ def fetch_weather(location: str) -> str: try: output = functions.execute(tool_call) tool_outputs.append( - { - "tool_call_id": tool_call.id, - "output": output, - } + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) ) except Exception as e: print(f"Error executing tool_call {tool_call.id}: {e}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py index 551009dd4f6e..1f0ba29f638d 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py @@ -20,16 +20,19 @@ Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ +from typing import Any import os from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun -from azure.ai.projects.models import AgentEventHandler +from azure.ai.projects.models import ( + AgentEventHandler, + FunctionTool, + MessageDeltaTextContent, + RequiredFunctionToolCall, + SubmitToolOutputsAction, + ToolOutput +) from azure.identity import DefaultAzureCredential -from azure.ai.projects.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction - -from typing import Any - from user_functions import user_functions @@ -71,10 +74,10 @@ def on_thread_run(self, run: "ThreadRun") -> None: try: output = functions.execute(tool_call) tool_outputs.append( - { - "tool_call_id": tool_call.id, - "output": output, - } + ToolOutput( + tool_call_id=tool_call.id, + output=output, + ) ) except Exception as e: print(f"Error executing tool_call {tool_call.id}: {e}") From 0fe5ad820bc08332484e54978c84512941576458 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 7 Nov 2024 08:05:09 -0800 Subject: [PATCH 085/138] Connections APIs throw instead of returning None if a connection name/type is not found (#38383) * Make some APIs throw instead of returning None. Re-try to get connection string * Update README.md. Work in progress * Fixes --- sdk/ai/azure-ai-projects/README.md | 82 +++++++++++++--- .../azure/ai/projects/_patch.py | 2 +- .../azure/ai/projects/aio/_patch.py | 2 +- .../ai/projects/aio/operations/_patch.py | 92 +++++++++--------- .../azure/ai/projects/operations/_patch.py | 94 ++++++++++--------- .../async_samples/sample_connections_async.py | 4 +- .../samples/connections/sample_connections.py | 19 ++-- .../tests/connections/connection_test_base.py | 6 ++ .../tests/connections/test_connections.py | 31 +++--- .../connections/test_connections_async.py | 37 +++----- 10 files changed, 221 insertions(+), 148 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index ebdaea691bda..c8bb1b4d6dca 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -1,5 +1,4 @@ - # Azure Ai Projects client library for Python @@ -362,21 +361,81 @@ project_client.agents.delete_agent(agent.id) print("Deleted agent") ``` -## Examples +## Troubleshooting + +### Exceptions + +Client methods that make service calls raise an [HttpResponseError](https://learn.microsoft.com/python/api/azure-core/azure.core.exceptions.httpresponseerror) exception for a non-success HTTP status code response from the service. The exception's `status_code` will hold the HTTP response status code (with `reason` showing the friendly name). The exception's `error.message` contains a detailed message that may be helpful in diagnosing the issue: ```python ->>> from azure.ai.projects import AIProjectClient ->>> from azure.identity import DefaultAzureCredential ->>> from azure.core.exceptions import HttpResponseError +from azure.core.exceptions import HttpResponseError ->>> client = AIProjectClient(endpoint='', credential=DefaultAzureCredential()) ->>> try: - - except HttpResponseError as e: - print('service responds error: {}'.format(e.response.json())) +... + +try: + result = client.connections.list() +except HttpResponseError as e: + print(f"Status code: {e.status_code} ({e.reason})") + print(e.message) +``` +For example, when you provide wrong credentials: + +```text +Status code: 401 (Unauthorized) +Operation returned an invalid status 'Unauthorized' ``` +### Logging + +The client uses the standard [Python logging library](https://docs.python.org/3/library/logging.html). The SDK logs HTTP request and response details, which may be useful in troubleshooting. To log to stdout, add the following: + +```python +import sys +import logging + +# Acquire the logger for this client library. Use 'azure' to affect both +# 'azure.core` and `azure.ai.inference' libraries. +logger = logging.getLogger("azure") + +# Set the desired logging level. logging.INFO or logging.DEBUG are good options. +logger.setLevel(logging.DEBUG) + +# Direct logging output to stdout: +handler = logging.StreamHandler(stream=sys.stdout) +# Or direct logging output to a file: +# handler = logging.FileHandler(filename="sample.log") +logger.addHandler(handler) + +# Optional: change the default logging format. Here we add a timestamp. +#formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(name)s:%(message)s") +#handler.setFormatter(formatter) +``` + +By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key or token), and the request and response payloads. To create logs without redaction, add `logging_enable = True` to the client constructor: + +```python +client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=project_connection_string, + logging_enable = True +) +``` + +Note that the log level must be set to `logging.DEBUG` (see above code). Logs will be redacted with any other log level. + +Be sure to protect non redacted logs to avoid compromising security. + +For more information, see [Configure logging in the Azure libraries for Python](https://aka.ms/azsdk/python/logging) + +### Reporting issues + +To report issues with the client library, or request additional features, please open a GitHub issue [here](https://github.com/Azure/azure-sdk-for-python/issues) + +## Next steps + +Have a look at the [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/ai/azure-ai-projects/samples) folder, containing fully runnable Python code for synchronous and asynchronous clients. + ## Contributing This project welcomes contributions and suggestions. Most contributions require @@ -402,6 +461,3 @@ additional questions or comments. [default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential [pip]: https://pypi.org/project/pip/ [azure_sub]: https://azure.microsoft.com/free/ - - - diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index c0f3a1494b2c..312367a8dec6 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -42,7 +42,7 @@ def __init__( if not project_name: raise ValueError("project_name is required") if not credential: - raise ValueError("Credential is required") + raise ValueError("credential is required") if "api_version" in kwargs: raise ValueError("No support for overriding the API version") if "credential_scopes" in kwargs: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index ca13bf2cf3f9..9a21737b6bb8 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -45,7 +45,7 @@ def __init__( if not project_name: raise ValueError("project_name is required") if not credential: - raise ValueError("Credential is required") + raise ValueError("credential is required") if "api_version" in kwargs: raise ValueError("No support for overriding the API version") if "credential_scopes" in kwargs: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index d609d3f91dae..9d95d605aa9e 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -53,7 +53,7 @@ from azure.ai.projects import _types from azure.ai.inference.aio import ChatCompletionsClient, EmbeddingsClient from openai import AsyncAzureOpenAI - from azure.identity import get_bearer_token_provider + from azure.identity.aio import get_bearer_token_provider logger = logging.getLogger(__name__) @@ -71,9 +71,15 @@ async def get_chat_completions_client(self, **kwargs) -> "Optional[ChatCompletio """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource. At least one AI model that supports chat completions must be deployed in this resource. The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. + Raises ~azure.core.exceptions.ResourceNotFoundError exception if an Azure AI Services connection + does not exist. + Raises ~azure.core.exceptions.ModuleNotFoundError exception if the `azure-ai-inference` package + is not installed. :return: An authenticated chat completions client :rtype: ~azure.ai.inference.models.ChatCompletionsClient + :raises ~azure.core.exceptions.ResourceNotFoundError: + :raises ~azure.core.exceptions.ModuleNotFoundError: :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) @@ -86,14 +92,10 @@ async def get_chat_completions_client(self, **kwargs) -> "Optional[ChatCompletio connection = await self._outer_instance.connections.get_default( connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs ) - if not connection: - return None else: connection = await self._outer_instance.connections.get_default( connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, **kwargs ) - if not connection: - return None try: from azure.ai.inference.aio import ChatCompletionsClient @@ -136,9 +138,15 @@ async def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource. At least one AI model that supports text embeddings must be deployed in this resource. The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. + Raises ~azure.core.exceptions.ResourceNotFoundError exception if an Azure AI Services connection + does not exist. + Raises ~azure.core.exceptions.ModuleNotFoundError exception if the `azure-ai-inference` package + is not installed. :return: An authenticated chat completions client :rtype: ~azure.ai.inference.models.EmbeddingsClient + :raises ~azure.core.exceptions.ResourceNotFoundError: + :raises ~azure.core.exceptions.ModuleNotFoundError: :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) @@ -151,14 +159,10 @@ async def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": connection = await self._outer_instance.connections.get_default( connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs ) - if not connection: - return None else: connection = await self._outer_instance.connections.get_default( connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, **kwargs ) - if not connection: - return None try: from azure.ai.inference.aio import EmbeddingsClient @@ -200,6 +204,10 @@ async def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": async def get_azure_openai_client(self, *, api_version: Optional[str] = None, **kwargs) -> "AsyncAzureOpenAI": """Get an authenticated AsyncAzureOpenAI client (from the `openai` package) for the default Azure OpenAI connection. The package `openai` must be installed prior to calling this method. + Raises ~azure.core.exceptions.ResourceNotFoundError exception if an Azure OpenAI connection + does not exist. + Raises ~azure.core.exceptions.ModuleNotFoundError exception if the `openai` package + is not installed. :keyword api_version: The Azure OpenAI api-version to use when creating the client. Optional. See "Data plane - Inference" row in the table at @@ -208,14 +216,14 @@ async def get_azure_openai_client(self, *, api_version: Optional[str] = None, ** :paramtype api_version: str :return: An authenticated AsyncAzureOpenAI client :rtype: ~openai.AsyncAzureOpenAI + :raises ~azure.core.exceptions.ResourceNotFoundError: + :raises ~azure.core.exceptions.ModuleNotFoundError: :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) connection = await self._outer_instance.connections.get_default( connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs ) - if not connection: - raise ValueError("No Azure OpenAI connection found.") try: from openai import AsyncAzureOpenAI @@ -235,7 +243,7 @@ async def get_azure_openai_client(self, *, api_version: Optional[str] = None, ** ): try: - from azure.identity import get_bearer_token_provider + from azure.identity.aio import get_bearer_token_provider except ModuleNotFoundError as _: raise ModuleNotFoundError( "azure.identity package not installed. Please install it using 'pip install azure-identity'" @@ -264,16 +272,18 @@ class ConnectionsOperations(ConnectionsOperationsGenerated): @distributed_trace_async async def get_default( self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any - ) -> Optional[ConnectionProperties]: + ) -> ConnectionProperties: """Get the properties of the default connection of a certain connection type, with or without - populating authentication credentials. + populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError + exception if a connection with the given name was not found. :param connection_type: The connection type. Required. :type connection_type: ~azure.ai.projects.models._models.ConnectionType :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. :type with_credentials: bool :return: The connection properties, or `None` if there are no connections of the specified type. - :rtype: ~azure.ai.projects.models._models.ConnectionProperties + :rtype: ~azure.ai.projects.model.ConnectionProperties + :raises ~azure.core.exceptions.ResourceNotFoundError: :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) @@ -289,34 +299,32 @@ async def get_default( ) else: return connection_properties_list[0] - else: - return None + raise ResourceNotFoundError(f"No connection of type {connection_type} found") @distributed_trace_async async def get( self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any - ) -> Optional[ConnectionProperties]: + ) -> ConnectionProperties: """Get the properties of a single connection, given its connection name, with or without - populating authentication credentials. + populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError + exception if a connection with the given name was not found. :param connection_name: Connection Name. Required. :type connection_name: str :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. :type with_credentials: bool :return: The connection properties, or `None` if a connection with this name does not exist. - :rtype: ~azure.ai.projects.models._models.ConnectionProperties + :rtype: ~azure.ai.projects.models.ConnectionProperties + :raises ~azure.core.exceptions.ResourceNotFoundError: :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) if not connection_name: raise ValueError("Endpoint name cannot be empty") if with_credentials: - try: - connection: GetConnectionResponse = await self._get_connection_with_secrets( - connection_name=connection_name, ignored="ignore", **kwargs - ) - except ResourceNotFoundError as _: - return None + connection: GetConnectionResponse = await self._get_connection_with_secrets( + connection_name=connection_name, ignored="ignore", **kwargs + ) if connection.properties.auth_type == AuthenticationType.ENTRA_ID: return ConnectionProperties(connection=connection, token_credential=self._config.credential) elif connection.properties.auth_type == AuthenticationType.SAS: @@ -336,10 +344,7 @@ async def get( return ConnectionProperties(connection=connection) else: - try: - connection = await self._get_connection(connection_name=connection_name, **kwargs) - except ResourceNotFoundError as _: - return None + connection = await self._get_connection(connection_name=connection_name, **kwargs) return ConnectionProperties(connection=connection) @distributed_trace_async @@ -371,7 +376,6 @@ async def list( class TelemetryOperations(TelemetryOperationsGenerated): _connection_string: Optional[str] = None - _get_connection_string_called: bool = False def __init__(self, *args, **kwargs): self._outer_instance = kwargs.pop("outer_instance") @@ -380,27 +384,29 @@ def __init__(self, *args, **kwargs): async def get_connection_string(self) -> Optional[str]: """ Get the Application Insights connection string associated with the Project's Application Insights resource. - On first call, this method makes a GET call to the Application Insights resource URL to get the connection string. - Subsequent calls return the cached connection string. + On first call, this method makes a service call to the Application Insights resource URL to get the connection string. + Subsequent calls return the cached connection string, if one exists. + Raises ~azure.core.exceptions.ResourceNotFoundError exception if an Application Insights resource was not + enabled for this project. - :return: The connection string, or `None` if an Application Insights resource was not enabled for the Project. + :return: The Application Insights connection string if a the resource was enabled for the Project. :rtype: str + :raises ~azure.core.exceptions.ResourceNotFoundError """ - if not self._get_connection_string_called: + if not self._connection_string: # Get the AI Studio Project properties, including Application Insights resource URL if exists get_workspace_response: GetWorkspaceResponse = await self._outer_instance.connections._get_workspace() - # Continue only if Application Insights resource was enabled for this Project - if get_workspace_response.properties.application_insights: + if not get_workspace_response.properties.application_insights: + raise ResourceNotFoundError("Application Insights resource was not enabled for this Project.") - # Make a GET call to the Application Insights resource URL to get the connection string - app_insights_respose: GetAppInsightsResponse = await self._get_app_insights( - app_insights_resource_url=get_workspace_response.properties.application_insights - ) + # Make a GET call to the Application Insights resource URL to get the connection string + app_insights_respose: GetAppInsightsResponse = await self._get_app_insights( + app_insights_resource_url=get_workspace_response.properties.application_insights + ) - self._connection_string = app_insights_respose.properties.connection_string + self._connection_string = app_insights_respose.properties.connection_string - self._get_connection_string_called = True return self._connection_string # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index a2cf113f9920..dd5b51dd8c30 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -57,13 +57,19 @@ def __init__(self, outer_instance): self._outer_instance = outer_instance @distributed_trace - def get_chat_completions_client(self, **kwargs) -> "Optional[ChatCompletionsClient]": + def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": """Get an authenticated ChatCompletionsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource. At least one AI model that supports chat completions must be deployed in this resource. The package `azure-ai-inference` must be installed prior to calling this method. + Raises ~azure.core.exceptions.ResourceNotFoundError exception if an Azure AI Services connection + does not exist. + Raises ~azure.core.exceptions.ModuleNotFoundError exception if the `azure-ai-inference` package + is not installed. :return: An authenticated chat completions client, or `None` if no Azure AI Services connection is found. :rtype: ~azure.ai.inference.models.ChatCompletionsClient + :raises ~azure.core.exceptions.ResourceNotFoundError: + :raises ~azure.core.exceptions.ModuleNotFoundError: :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) @@ -76,14 +82,10 @@ def get_chat_completions_client(self, **kwargs) -> "Optional[ChatCompletionsClie connection = self._outer_instance.connections.get_default( connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs ) - if not connection: - return None else: connection = self._outer_instance.connections.get_default( connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, **kwargs ) - if not connection: - return None try: from azure.ai.inference import ChatCompletionsClient @@ -126,9 +128,15 @@ def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": """Get an authenticated EmbeddingsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource. At least one AI model that supports text embeddings must be deployed in this resource. The package `azure-ai-inference` must be installed prior to calling this method. - + Raises ~azure.core.exceptions.ResourceNotFoundError exception if an Azure AI Services connection + does not exist. + Raises ~azure.core.exceptions.ModuleNotFoundError exception if the `azure-ai-inference` package + is not installed. + :return: An authenticated chat completions client :rtype: ~azure.ai.inference.models.EmbeddingsClient + :raises ~azure.core.exceptions.ResourceNotFoundError: + :raises ~azure.core.exceptions.ModuleNotFoundError: :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) @@ -141,14 +149,10 @@ def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": connection = self._outer_instance.connections.get_default( connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs ) - if not connection: - return None else: connection = self._outer_instance.connections.get_default( connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, **kwargs ) - if not connection: - return None try: from azure.ai.inference import EmbeddingsClient @@ -190,6 +194,10 @@ def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": def get_azure_openai_client(self, *, api_version: Optional[str] = None, **kwargs) -> "AzureOpenAI": """Get an authenticated AzureOpenAI client (from the `openai` package) for the default Azure OpenAI connection. The package `openai` must be installed prior to calling this method. + Raises ~azure.core.exceptions.ResourceNotFoundError exception if an Azure OpenAI connection + does not exist. + Raises ~azure.core.exceptions.ModuleNotFoundError exception if the `openai` package + is not installed. :keyword api_version: The Azure OpenAI api-version to use when creating the client. Optional. See "Data plane - Inference" row in the table at @@ -198,6 +206,8 @@ def get_azure_openai_client(self, *, api_version: Optional[str] = None, **kwargs :paramtype api_version: str :return: An authenticated AzureOpenAI client :rtype: ~openai.AzureOpenAI + :raises ~azure.core.exceptions.ResourceNotFoundError: + :raises ~azure.core.exceptions.ModuleNotFoundError: :raises ~azure.core.exceptions.HttpResponseError: """ @@ -205,8 +215,6 @@ def get_azure_openai_client(self, *, api_version: Optional[str] = None, **kwargs connection = self._outer_instance.connections.get_default( connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs ) - if not connection: - raise ValueError("No Azure OpenAI connection found") try: from openai import AzureOpenAI @@ -254,16 +262,18 @@ class ConnectionsOperations(ConnectionsOperationsGenerated): @distributed_trace def get_default( self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any - ) -> Optional[ConnectionProperties]: + ) -> ConnectionProperties: """Get the properties of the default connection of a certain connection type, with or without - populating authentication credentials. + populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError + exception if a connection with the given name was not found. :param connection_type: The connection type. Required. :type connection_type: ~azure.ai.projects.models._models.ConnectionType :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. :type with_credentials: bool :return: The connection properties, or `None` if there are no connections of the specified type. - :rtype: ~azure.ai.projects.models._models.ConnectionProperties + :rtype: ~azure.ai.projects.models.ConnectionProperties + :raises ~azure.core.exceptions.ResourceNotFoundError: :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) @@ -279,34 +289,32 @@ def get_default( ) else: return connection_properties_list[0] - else: - return None + raise ResourceNotFoundError(f"No connection of type {connection_type} found") @distributed_trace def get( self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any - ) -> Optional[ConnectionProperties]: + ) -> ConnectionProperties: """Get the properties of a single connection, given its connection name, with or without - populating authentication credentials. + populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError + exception if a connection with the given name was not found. :param connection_name: Connection Name. Required. :type connection_name: str :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. :type with_credentials: bool :return: The connection properties, or `None` if a connection with this name does not exist. - :rtype: ~azure.ai.projects.models._models.ConnectionProperties + :rtype: ~azure.ai.projects.models.ConnectionProperties + :raises ~azure.core.exceptions.ResourceNotFoundError: :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) if not connection_name: raise ValueError("Connection name cannot be empty") if with_credentials: - try: - connection: GetConnectionResponse = self._get_connection_with_secrets( - connection_name=connection_name, ignored="ignore", **kwargs - ) - except ResourceNotFoundError as _: - return None + connection: GetConnectionResponse = self._get_connection_with_secrets( + connection_name=connection_name, ignored="ignore", **kwargs + ) if connection.properties.auth_type == AuthenticationType.ENTRA_ID: return ConnectionProperties(connection=connection, token_credential=self._config.credential) elif connection.properties.auth_type == AuthenticationType.SAS: @@ -326,10 +334,7 @@ def get( return ConnectionProperties(connection=connection) else: - try: - connection = self._get_connection(connection_name=connection_name, **kwargs) - except ResourceNotFoundError as _: - return None + connection = self._get_connection(connection_name=connection_name, **kwargs) return ConnectionProperties(connection=connection) @distributed_trace @@ -462,36 +467,37 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: class TelemetryOperations(TelemetryOperationsGenerated): _connection_string: Optional[str] = None - _get_connection_string_called: bool = False def __init__(self, *args, **kwargs): self._outer_instance = kwargs.pop("outer_instance") super().__init__(*args, **kwargs) - def get_connection_string(self) -> Optional[str]: + def get_connection_string(self) -> str: """ Get the Application Insights connection string associated with the Project's Application Insights resource. - On first call, this method makes a GET call to the Application Insights resource URL to get the connection string. - Subsequent calls return the cached connection string. + On first call, this method makes a service call to the Application Insights resource URL to get the connection string. + Subsequent calls return the cached connection string, if one exists. + Raises ~azure.core.exceptions.ResourceNotFoundError exception if an Application Insights resource was not + enabled for this project. - :return: The connection string, or `None` if an Application Insights resource was not enabled for the Project. + :return: The Application Insights connection string if a the resource was enabled for the Project. :rtype: str + :raises ~azure.core.exceptions.ResourceNotFoundError """ - if not self._get_connection_string_called: + if not self._connection_string: # Get the AI Studio Project properties, including Application Insights resource URL if exists get_workspace_response: GetWorkspaceResponse = self._outer_instance.connections._get_workspace() - # Continue only if Application Insights resource was enabled for this Project - if get_workspace_response.properties.application_insights: + if not get_workspace_response.properties.application_insights: + raise ResourceNotFoundError("Application Insights resource was not enabled for this Project.") - # Make a GET call to the Application Insights resource URL to get the connection string - app_insights_respose: GetAppInsightsResponse = self._get_app_insights( - app_insights_resource_url=get_workspace_response.properties.application_insights - ) + # Make a GET call to the Application Insights resource URL to get the connection string + app_insights_respose: GetAppInsightsResponse = self._get_app_insights( + app_insights_resource_url=get_workspace_response.properties.application_insights + ) - self._connection_string = app_insights_respose.properties.connection_string + self._connection_string = app_insights_respose.properties.connection_string - self._get_connection_string_called = True return self._connection_string # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py index d23d9cb3fdc5..fb7ee382356e 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -14,7 +14,7 @@ Before running the sample: - pip install azure-ai-projects aiohttp azure-identity + pip install azure-ai-projects azure-identity azure-ai-inference openai aiohttp Set these environment variables with your own values: 1) PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in the "Project overview" @@ -88,7 +88,7 @@ async def sample_connections_async(): ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: print("====> Creating AzureOpenAI client using Entra ID authentication") - from azure.identity import get_bearer_token_provider + from azure.identity.aio import get_bearer_token_provider client = AsyncAzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index a7b9c39cbdfa..64a5aecae3e1 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -14,7 +14,7 @@ Before running the sample: - pip install azure-ai-projects azure-identity + pip install azure-ai-projects azure-identity azure-ai-inference openai Set these environment variables with your own values: 1) PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in the "Project overview" @@ -27,11 +27,9 @@ import os from azure.ai.projects import AIProjectClient from azure.ai.projects.models import ConnectionType, AuthenticationType -from openai import AzureOpenAI -from azure.ai.inference import ChatCompletionsClient -from azure.ai.inference.models import UserMessage -from azure.identity import DefaultAzureCredential, get_bearer_token_provider -from azure.core.credentials import AzureKeyCredential +from azure.identity import DefaultAzureCredential + +#from azure.identity import DefaultAzureCredential, get_bearer_token_provider project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] connection_name = os.environ["CONNECTION_NAME"] @@ -77,6 +75,8 @@ # Examples of how you would create Inference client if connection.connection_type == ConnectionType.AZURE_OPEN_AI: + from openai import AzureOpenAI + if connection.authentication_type == AuthenticationType.API_KEY: print("====> Creating AzureOpenAI client using API key authentication") client = AzureOpenAI( @@ -86,6 +86,8 @@ ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: print("====> Creating AzureOpenAI client using Entra ID authentication") + from azure.identity import get_bearer_token_provider + client = AzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( @@ -111,8 +113,13 @@ elif connection.connection_type == ConnectionType.SERVERLESS: + from azure.ai.inference import ChatCompletionsClient + from azure.ai.inference.models import UserMessage + if connection.authentication_type == AuthenticationType.API_KEY: print("====> Creating ChatCompletionsClient using API key authentication") + from azure.core.credentials import AzureKeyCredential + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) elif connection.authentication_type == AuthenticationType.ENTRA_ID: # MaaS models do not yet support EntraID auth diff --git a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py index 6072048658da..e9b9202d0053 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py @@ -38,6 +38,12 @@ class ConnectionsTestBase(AzureRecordedTestCase): + NON_EXISTING_CONNECTION_NAME = "non-existing-connection-name" + EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_NAME = f"Connection {NON_EXISTING_CONNECTION_NAME} can't be found in this workspace" + + NON_EXISTING_CONNECTION_TYPE = "non-existing-connection-type" + EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_TYPE = f"No connection of type {NON_EXISTING_CONNECTION_TYPE} found" + def get_sync_client(self, **kwargs) -> AIProjectClient: conn_str = kwargs.pop("azure_ai_projects_connections_tests_project_connection_string") project_client = AIProjectClient.from_connection_string( diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py index b675602308e0..ebfd4c9db70e 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py @@ -6,6 +6,7 @@ from devtools_testutils import recorded_by_proxy from connection_test_base import ConnectionsTestBase, servicePreparerConnectionsTests from azure.ai.projects.models import ConnectionType +from azure.core.exceptions import ResourceNotFoundError # The test class name needs to start with "Test" to get collected by pytest @@ -20,13 +21,13 @@ def test_connections_get(self, **kwargs): with self.get_sync_client(**kwargs) as project_client: - assert ( - project_client.connections.get(connection_name="Some non-existing name", with_credentials=False) == None - ) - - assert ( - project_client.connections.get(connection_name="Some non-existing name", with_credentials=True) == None - ) + for with_credentials in [True, False]: + try: + connection_properties = project_client.connections.get(connection_name=ConnectionsTestBase.NON_EXISTING_CONNECTION_NAME, with_credentials=with_credentials) + assert False + except ResourceNotFoundError as e: + print(e) + assert ConnectionsTestBase.EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_NAME in e.message connection = project_client.connections.get(connection_name=aoai_connection, with_credentials=False) print(connection) @@ -75,15 +76,13 @@ def test_connections_get_default(self, **kwargs): with self.get_sync_client(**kwargs) as project_client: - assert ( - project_client.connections.get_default(connection_type="Some unrecognized type", with_credentials=False) - == None - ) - - assert ( - project_client.connections.get_default(connection_type="Some unrecognized type", with_credentials=True) - == None - ) + for with_credentials in [True, False]: + try: + connection_properties = project_client.connections.get_default(connection_type=ConnectionsTestBase.NON_EXISTING_CONNECTION_TYPE, with_credentials=with_credentials) + assert False + except ResourceNotFoundError as e: + print(e) + assert ConnectionsTestBase.EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_TYPE in e.message connection = project_client.connections.get_default( connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=False diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py index 4b6699196e81..8688fc2df694 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py @@ -6,6 +6,7 @@ from devtools_testutils.aio import recorded_by_proxy_async from connection_test_base import ConnectionsTestBase, servicePreparerConnectionsTests from azure.ai.projects.models import ConnectionType +from azure.core.exceptions import ResourceNotFoundError # The test class name needs to start with "Test" to get collected by pytest @@ -19,15 +20,13 @@ async def test_connections_get_async(self, **kwargs): async with self.get_async_client(**kwargs) as project_client: - assert ( - await project_client.connections.get(connection_name="Some non-existing name", with_credentials=False) - == None - ) - - assert ( - await project_client.connections.get(connection_name="Some non-existing name", with_credentials=True) - == None - ) + for with_credentials in [True, False]: + try: + connection_properties = await project_client.connections.get(connection_name=ConnectionsTestBase.NON_EXISTING_CONNECTION_NAME, with_credentials=with_credentials) + assert False + except ResourceNotFoundError as e: + print(e) + assert ConnectionsTestBase.EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_NAME in e.message connection = await project_client.connections.get(connection_name=aoai_connection, with_credentials=False) print(connection) @@ -80,19 +79,13 @@ async def test_connections_get_default_async(self, **kwargs): async with self.get_async_client(**kwargs) as project_client: - assert ( - await project_client.connections.get_default( - connection_type="Some unrecognized type", with_credentials=False - ) - == None - ) - - assert ( - await project_client.connections.get_default( - connection_type="Some unrecognized type", with_credentials=True - ) - == None - ) + for with_credentials in [True, False]: + try: + connection_properties = await project_client.connections.get_default(connection_type=ConnectionsTestBase.NON_EXISTING_CONNECTION_TYPE, with_credentials=with_credentials) + assert False + except ResourceNotFoundError as e: + print(e) + assert ConnectionsTestBase.EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_TYPE in e.message connection = await project_client.connections.get_default( connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=False From 2c28298e396df51b1694ff7791d2c16dd5b9c656 Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Thu, 7 Nov 2024 08:18:19 -0800 Subject: [PATCH 086/138] Add code interpreter file attachment sample and link to readme. Add alot more detail to readme also. (#38384) --- sdk/ai/azure-ai-projects/README.md | 68 +++++++++-- ...gents_with_file_search_attachment_async.py | 2 +- .../agents/sample_agents_code_interpreter.py | 2 +- ...s_with_code_interpreter_file_attachment.py | 107 ++++++++++++++++++ ...mple_agents_with_file_search_attachment.py | 2 +- 5 files changed, 166 insertions(+), 15 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_code_interpreter_file_attachment.py diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index c8bb1b4d6dca..16bfff8616c0 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -49,6 +49,7 @@ The following steps outline the typical sequence for interacting with agents: - Create a thread - Create a message with: - File search attachment + - Code interpreter attachment - Execute Run, Run_and_Process, or Stream - Retrieve messages - Tear down by deleting resources @@ -56,8 +57,15 @@ The following steps outline the typical sequence for interacting with agents: #### Create Project Client -To create a project client, use the following example: +When you create an project client, you need to make the decision to use synchronized or asynchronized client. Use either: +```python +from azure.ai.projects import AIProjectClient +# OR +from azure.ai.projects.aio import AIProjectClient +``` + +Here is an example of creating a project client: ```python @@ -165,7 +173,7 @@ print(f"Uploaded file, file ID: {file.id}") code_interpreter = CodeInterpreterTool(file_ids=[file.id]) -# notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment +# create agent with code interpreter tool and tools_resources agent = project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", @@ -180,8 +188,8 @@ agent = project_client.agents.create_agent( #### Create Agent with Function Tool You can enhance your agents by defining callback functions as function tools. These can be provided to `create_agent` via either the `toolset` parameter or the combination of `tools` and `tool_resources`. Here are the distinctions: -- `toolset`: When using the `toolset` parameter, you provide not only the function definitions and descriptions but also their implementations. The SDK will execute these functions within the `Run` or `Stream` context. Agents will invoke these functions based on their definitions. -- `tools` and `tool_resources`: When using the `tools` and `tool_resources` parameters, only the function definitions and descriptions are provided to `create_agent`, without the implementations. The `Run` or `event handler` will raise a `requires_action` status based on the function definitions. Your code must handle this status and invoke the appropriate functions. +- `toolset`: When using the `toolset` parameter, you provide not only the function definitions and descriptions but also their implementations. The SDK will execute these functions within `create_and_run_process` or `streaming` . These functions will be invoked based on their definitions. +- `tools` and `tool_resources`: When using the `tools` and `tool_resources` parameters, only the function definitions and descriptions are provided to `create_agent`, without the implementations. The `Run` or `event handler of stream` will raise a `requires_action` status based on the function definitions. Your code must handle this status and call the appropriate functions. For more details about calling functions by code, refer to [`sample_agents_stream_eventhandler_with_functions.py`](samples/agents/sample_agents_stream_eventhandler_with_functions.py) and [`sample_agents_functions.py`](samples/agents/sample_agents_functions.py). @@ -228,16 +236,13 @@ To create a message for assistant to process, you pass `user` as `role` and a qu ```python - - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" - ) +message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") ``` #### Create Message with File Search Attachment -To attach a file to a message, you need to `MessageAttachment` and `FileSearchTool`: +To attach a file to a message for content searching, you use `MessageAttachment` and `FileSearchTool`: @@ -250,11 +255,46 @@ message = project_client.agents.create_message( +#### Create Message with Code Interpreter File Attachment +To attach a file to a message for data analysis, you use `MessageAttachment` and `CodeInterpreterTool`. You must pass `CodeInterpreterTool` as `tools` or `toolset` in `create_agent` call or the file attachment cannot be opened for code interpreter. Here is an example: + + + +```python +# notice that CodeInterpreter must be enabled in the agent creation, +# otherwise the agent will not be able to see the file attachment for code interpretation +agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=CodeInterpreterTool().definitions, +) +print(f"Created agent, agent ID: {agent.id}") + +thread = project_client.agents.create_thread() +print(f"Created thread, thread ID: {thread.id}") + +# create an attachment +attachment = MessageAttachment(file_id=file.id, tools=CodeInterpreterTool().definitions) + +# create a message +message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + attachments=[attachment] +) +``` + + + #### Create Run, Run_and_Process, or Stream To process your message, you can use `create_run`, `create_and_process_run`, or `create_stream`. -Here is an example using `create_run`: +`create_run` requests the agent to process the message without polling for the result. If you are using `function tools` regardless as `toolset` or not, your code is responsible for polling for the result and acknowledging the status of `Run`. When the status is `requires_action`, your code is responsible for calling the function tools. For a code sample, visit [`sample_agents_functions.py`](samples/agents/sample_agents_functions.py). + +Here is an example of `create_run` and poll until the run is completed: @@ -270,7 +310,9 @@ while run.status in ["queued", "in_progress", "requires_action"]: -Here is an example using `create_and_process_run`: +To have the SDK poll on your behalf and call `function tools`, use the `create_and_process_run` method. Note that `function tools` will only be invoked if they are provided as `toolset` during the `create_agent` call. + +Here is an example: @@ -280,7 +322,9 @@ run = project_client.agents.create_and_process_run(thread_id=thread.id, assistan -Here is an example using `create_stream`: +With streaming, polling also need not be considered. If `function tools` are provided as `toolset` during the `create_agent` call, they will be invoked by the SDK. + +Here is an example: diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py index 048186eccaff..3eda61dd302a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -45,7 +45,7 @@ async def main(): file_path="../product_info_1.md", purpose=FilePurpose.AGENTS ) - # Create agent with file search tool + # Create agent agent = await project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py index 3f06dd335373..d48705d905b0 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py @@ -47,7 +47,7 @@ code_interpreter = CodeInterpreterTool(file_ids=[file.id]) - # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment + # create agent with code interpreter tool and tools_resources agent = project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_code_interpreter_file_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_code_interpreter_file_attachment.py new file mode 100644 index 000000000000..de4e8827d1a6 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_code_interpreter_file_attachment.py @@ -0,0 +1,107 @@ +# ------------------------------------ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_with_code_interpreter_file_attachment.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with code interpreter through file attachment from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_with_code_interpreter_file_attachment.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import CodeInterpreterTool, MessageAttachment +from azure.ai.projects.models import FilePurpose +from azure.identity import DefaultAzureCredential +from pathlib import Path + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + + # upload a file and wait for it to be processed + file = project_client.agents.upload_file_and_poll( + file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.AGENTS + ) + print(f"Uploaded file, file ID: {file.id}") + + # [START create_agent_and_message_with_code_interpreter_file_attachment] + # notice that CodeInterpreter must be enabled in the agent creation, + # otherwise the agent will not be able to see the file attachment for code interpretation + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=CodeInterpreterTool().definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # create an attachment + attachment = MessageAttachment(file_id=file.id, tools=CodeInterpreterTool().definitions) + + # create a message + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", + attachments=[attachment] + ) + # [END create_agent_and_message_with_code_interpreter_file_attachment] + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + project_client.agents.delete_file(file.id) + print("Deleted file") + + messages = project_client.agents.get_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + last_msg = messages.get_last_text_message_by_sender("assistant") + if last_msg: + print(f"Last Message: {last_msg.text.value}") + + for image_content in messages.image_contents: + print(f"Image File ID: {image_content.image_file.file_id}") + file_name = f"{image_content.image_file.file_id}_image_file.png" + project_client.agents.save_file(file_id=image_content.image_file.file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + + for file_path_annotation in messages.file_path_annotations: + print(f"File Paths:") + print(f"Type: {file_path_annotation.type}") + print(f"Text: {file_path_annotation.text}") + print(f"File ID: {file_path_annotation.file_path.file_id}") + print(f"Start Index: {file_path_annotation.start_index}") + print(f"End Index: {file_path_annotation.end_index}") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py index 711614dee81b..2a01e1f1836f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py @@ -43,7 +43,7 @@ file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) print(f"Uploaded file, file ID: {file.id}") - # Create agent with file search tool + # Create agent agent = project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", From f86795449112fd5bd2dd9edabc3ecc9d7502787c Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 7 Nov 2024 12:45:43 -0800 Subject: [PATCH 087/138] Fix broken async .telemetry.enable() method, when you don't pass in "destination" (#38399) * Fix README. Fix Optional[str] * Fix async telemetry --- sdk/ai/azure-ai-projects/README.md | 2 +- .../ai/projects/aio/operations/_patch.py | 29 +++++++++++++++---- .../azure/ai/projects/operations/_patch.py | 2 +- 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 16bfff8616c0..423dc4afd249 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -1,5 +1,5 @@ -# Azure Ai Projects client library for Python +# Azure AI Projects client library for Python ## Getting started diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 9d95d605aa9e..6dcdd0515bc0 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -381,7 +381,7 @@ def __init__(self, *args, **kwargs): self._outer_instance = kwargs.pop("outer_instance") super().__init__(*args, **kwargs) - async def get_connection_string(self) -> Optional[str]: + async def get_connection_string(self) -> str: """ Get the Application Insights connection string associated with the Project's Application Insights resource. On first call, this method makes a service call to the Application Insights resource URL to get the connection string. @@ -411,12 +411,29 @@ async def get_connection_string(self) -> Optional[str]: # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? # TODO: This could be a class method. But we don't have a class property AIProjectClient.telemetry - def enable(self, *, destination: Union[TextIO, str], **kwargs) -> None: - """Enable tracing to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) collector. + async def enable(self, *, destination: Union[TextIO, str, None] = None, **kwargs) -> None: + """Enables telemetry collection with OpenTelemetry for Azure AI clients and popular GenAI libraries. - :keyword destination: `sys.stdout` for tracing to console output, or a string holding the - endpoint URL of the OpenTelemetry Protocol (OTLP) collector. Required. - :paramtype destination: Union[TextIO, str] + Following instrumentations are enabled (when corresponding packages are installed): + + - Azure AI Inference (`azure-ai-inference`) + - Azure AI Projects (`azure-ai-projects`) + - OpenAI (`opentelemetry-instrumentation-openai-v2`) + - Langchain (`opentelemetry-instrumentation-langchain`) + + The recording of prompt and completion messages is disabled by default. To enable it, set the + `AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED` environment variable to `true`. + + When destination is provided, the method configures OpenTelemetry SDK to export traces to + stdout or OTLP (OpenTelemetry protocol) gRPC endpoint. It's recommended for local + development only. For production use, make sure to configure OpenTelemetry SDK directly. + + :keyword destination: Recommended for local testing only. Set it to `sys.stdout` for + tracing to console output, or a string holding the OpenTelemetry protocol (OTLP) + endpoint such as "http://localhost:4317. + If not provided, the method enables instrumentations, but does not configure OpenTelemetry + SDK to export traces. + :paramtype destination: Union[TextIO, str, None] """ _enable_telemetry(destination=destination, **kwargs) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index dd5b51dd8c30..1e3769ea5efc 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -413,7 +413,7 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: tp = cast(TracerProvider, trace.get_tracer_provider()) tp.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) else: - raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIOWrapper`") + raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") # Silently try to load a set of relevant Instrumentors try: From 50f8aa1abefdd3d13f9d0f381267435086bb5e2b Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Thu, 7 Nov 2024 13:13:11 -0800 Subject: [PATCH 088/138] Sample for thread with resource and document it in readme (#38393) --- sdk/ai/azure-ai-projects/README.md | 73 +++++++++++++- .../sample_agents_with_resources_in_thread.py | 98 +++++++++++++++++++ 2 files changed, 168 insertions(+), 3 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 423dc4afd249..21cc8ba3028c 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -46,7 +46,8 @@ The following steps outline the typical sequence for interacting with agents: - File Search with file upload indexed by vector stores - Code Interpreter with file upload - Function calls - - Create a thread + - Create a thread with + - Tool resource - Create a message with: - File search attachment - Code interpreter attachment @@ -77,9 +78,32 @@ project_client = AIProjectClient.from_connection_string( +Because the client is under resource and context manager, you are required to use `with` or `async with` to consume the client object: + +```python +# For synchronize +with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant" + ) + +# For asynchronize +async with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant" + ) + +``` + +In the sections below, we will only provide code snips in synchronized functions + #### Create Agent -Here is an example of create an agent: +Now you should have your project client. From the project client, you create an agent to serve the end users. An agent should Here is an example of create an agent: ```python @@ -229,6 +253,47 @@ agent = await project_client.agents.create_agent( +#### Create Thread + +For each session or conversation, a thread is required. Here is an example: + + + +```python + + thread = project_client.agents.create_thread() +``` + + + +#### Create Thread with Tool Resource + +In some scenarios, you might need to assign specific resources to individual threads. To achieve this, you provide the `tool_resources` argument to `create_thread`. In the following example, you create a vector storre and upload a file, enable an agent for file search using the `tools` argument, and then associate the file with the thread using the `tool_resources` argument. + + + + +```python +file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") +print(f"Uploaded file, file ID: {file.id}") + +vector_store = project_client.agents.create_vector_store_and_poll( + file_ids=[file.id], name="my_vectorstore" +) +print(f"Created vector store, vector store ID: {vector_store.id}") + +# Create file search tool with resources followed by creating agent +file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + +agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, +) +``` + + #### Create Message To create a message for assistant to process, you pass `user` as `role` and a question as `content`: @@ -256,7 +321,9 @@ message = project_client.agents.create_message( #### Create Message with Code Interpreter File Attachment -To attach a file to a message for data analysis, you use `MessageAttachment` and `CodeInterpreterTool`. You must pass `CodeInterpreterTool` as `tools` or `toolset` in `create_agent` call or the file attachment cannot be opened for code interpreter. Here is an example: +To attach a file to a message for data analysis, you use `MessageAttachment` and `CodeInterpreterTool`. You must pass `CodeInterpreterTool` as `tools` or `toolset` in `create_agent` call or the file attachment cannot be opened for code interpreter. + +Here is an example to pass `CodeInterpreterTool` as tool: diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py new file mode 100644 index 000000000000..d96a05128cc3 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py @@ -0,0 +1,98 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_with_resources_in_thread.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with file searching from + the Azure Agents service using a synchronous client. The file is attached to thread. + +USAGE: + python sample_agents_with_resources_in_thread.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models._patch import FileSearchTool +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + + # Upload file and create vector store + # [START create_agent_and_thread_for_file_search] + file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") + print(f"Uploaded file, file ID: {file.id}") + + vector_store = project_client.agents.create_vector_store_and_poll( + file_ids=[file.id], name="my_vectorstore" + ) + print(f"Created vector store, vector store ID: {vector_store.id}") + + # Create file search tool with resources followed by creating agent + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + ) + # [END upload_file_create_vector_store_and_agent_with_file_search_tool] + + print(f"Created agent, ID: {agent.id}") + + # Create thread with file resources. + # If the agent has multiple threads, only this thread can search this file. + thread = project_client.agents.create_thread(tool_resources=file_search.resources) + # [END create_agent_and_thread_for_file_search] + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, what Contoso products do you know?" + ) + print(f"Created message, ID: {message.id}") + + # Create and process assistant run in thread with tools + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + # [START teardown] + # Delete the file when done + project_client.agents.delete_vector_store(vector_store.id) + print("Deleted vector store") + + project_client.agents.delete_file(file_id=file.id) + print("Deleted file") + + # Delete the agent when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + # [END teardown] + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") From e8e86776e227f24b25edc07ccd9adbb4687ea1b6 Mon Sep 17 00:00:00 2001 From: Ankit Singhal <30610298+singankit@users.noreply.github.com> Date: Thu, 7 Nov 2024 14:11:47 -0800 Subject: [PATCH 089/138] Users/singankit/evaluation test (#38402) * Test file * Adding fixture * Adding evaluation e2e test * Update azure_ai_projects_tests.env --- .../azure_ai_projects_tests.env | 9 ++- sdk/ai/azure-ai-projects/tests/conftest.py | 66 ++++++++++++++++- .../evaluation/data/evaluate_test_data.jsonl | 3 + .../tests/evaluation/evaluation_test_base.py | 46 ++++++++++++ .../tests/evaluation/test_evaluation.py | 73 +++++++++++++++++++ 5 files changed, 192 insertions(+), 5 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/tests/evaluation/data/evaluate_test_data.jsonl create mode 100644 sdk/ai/azure-ai-projects/tests/evaluation/evaluation_test_base.py create mode 100644 sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py diff --git a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env index b26cf7bd11b0..d000ead3a824 100644 --- a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env +++ b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env @@ -50,7 +50,10 @@ AZURE_AI_PROJECTS_TELEMETRY_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_PROJECTS_ ######################################################################################################################## -# Telemetry tests +# Evaluation tests # - - +AZURE_AI_PROJECTS_EVALUATIONS_TESTS_PROJECT_CONNECTION_STRING= +AZURE_AI_PROJECTS_EVALUATIONS_TESTS_DEFAULT_AOAI_CONNECTION_NAME= +AZURE_AI_PROJECTS_EVALUATIONS_TESTS_DEPLOYMENT_NAME= +AZURE_AI_PROJECTS_EVALUATIONS_TESTS_API_VERSION= +AZURE_AI_PROJECTS_EVALUATIONS_TESTS_DATASET_ID= diff --git a/sdk/ai/azure-ai-projects/tests/conftest.py b/sdk/ai/azure-ai-projects/tests/conftest.py index ab514fc08f68..03ba48a9ca49 100644 --- a/sdk/ai/azure-ai-projects/tests/conftest.py +++ b/sdk/ai/azure-ai-projects/tests/conftest.py @@ -2,14 +2,44 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ +import os import pytest -from devtools_testutils import remove_batch_sanitizers +from devtools_testutils import remove_batch_sanitizers, get_credential, test_proxy, add_general_regex_sanitizer from dotenv import load_dotenv, find_dotenv +from azure.ai.projects import AIProjectClient if not load_dotenv(find_dotenv(filename="azure_ai_projects_tests.env"), override=True): print("Failed to apply environment variables for azure-ai-projects tests.") +class SanitizedValues: + SUBSCRIPTION_ID = "00000000-0000-0000-0000-000000000000" + RESOURCE_GROUP_NAME = "00000" + WORKSPACE_NAME = "00000" + CONNECTION_NAME = "00000" + DATASET_NAME = "00000" + TENANT_ID = "00000000-0000-0000-0000-000000000000" + USER_OBJECT_ID = "00000000-0000-0000-0000-000000000000" + +@pytest.fixture(scope="session") +def mock_project_scope(): + return { + "subscription_id": f"{SanitizedValues.SUBSCRIPTION_ID}", + "resource_group_name": f"{SanitizedValues.RESOURCE_GROUP_NAME}", + "project_name": f"{SanitizedValues.WORKSPACE_NAME}", + } + +@pytest.fixture(scope="session") +def mock_dataset_name(): + return { + "dataset_name": f"{SanitizedValues.DATASET_NAME}", + } + +@pytest.fixture(scope="session") +def mock_connection_name(): + return { + "connection_name": f"{SanitizedValues.CONNECTION_NAME}", + } # autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method @pytest.fixture(scope="session", autouse=True) @@ -18,7 +48,39 @@ def start_proxy(test_proxy): @pytest.fixture(scope="session", autouse=True) -def add_sanitizers(test_proxy): +def add_sanitizers(test_proxy, mock_project_scope, mock_dataset_name, mock_connection_name): # Remove the following sanitizers since certain fields are needed in tests and are non-sensitive: # - AZSDK3493: $..name + + def azure_workspace_triad_sanitizer(): + """Sanitize subscription, resource group, and workspace.""" + add_general_regex_sanitizer( + regex=r"/subscriptions/([-\w\._\(\)]+)", + value=mock_project_scope["subscription_id"], + group_for_replace="1", + ) + add_general_regex_sanitizer( + regex=r"/resource[gG]roups/([-\w\._\(\)]+)", + value=mock_project_scope["resource_group_name"], + group_for_replace="1", + ) + add_general_regex_sanitizer( + regex=r"/workspaces/([-\w\._\(\)]+)", value=mock_project_scope["project_name"], group_for_replace="1" + ) + + add_general_regex_sanitizer( + regex=r"/connections/([-\w\._\(\)]+)", value=mock_connection_name["connection_name"], group_for_replace="1" + ) + + add_general_regex_sanitizer( + regex=r"/data/([-\w\._\(\)]+)", value=mock_dataset_name["dataset_name"], group_for_replace="1" + ) + + add_general_regex_sanitizer( + regex=r"/runs/([-\w\._\(\)]+)", value="Sanitized", group_for_replace="1" + ) + + azure_workspace_triad_sanitizer() + remove_batch_sanitizers(["AZSDK3493"]) + diff --git a/sdk/ai/azure-ai-projects/tests/evaluation/data/evaluate_test_data.jsonl b/sdk/ai/azure-ai-projects/tests/evaluation/data/evaluate_test_data.jsonl new file mode 100644 index 000000000000..ddca8949914d --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/evaluation/data/evaluate_test_data.jsonl @@ -0,0 +1,3 @@ +{"query":"How do you create a run?","context":"AML API only","response":"To create a run using the Azure Machine Learning API, you first need to create an Experiment. Once you have an experiment, you can create a Run object that is associated with that experiment. Here is some Python code that demonstrates this process:\n\n```\nfrom azureml.core import Experiment, Run\nfrom azureml.core.workspace import Workspace\n\n# Define workspace and experiment\nws = Workspace.from_config()\nexp = Experiment(workspace=ws, name='my_experiment')\n\n# Create a new run\nrun = exp.start_logging()\n```\n\nIn this code, the `from_config()` method reads the configuration file that you created when you set up your Azure Machine Learning workspace. The `Experiment` constructor creates an Experiment object that is associated with your workspace, and the `start_logging()` method creates a new Run object that is associated with the Experiment. Now you can use the `run` object to log metrics, upload files, and track other information related to your machine learning experiment.", "ground_truth":"Paris is the capital of France."} +{"query":"How do you log a model?","context":"Logging can be done using any OSS Sdk","response":"There are a few ways to log models in Azure Machine Learning. \n\nOne way is to use the `register_model()` method of the `Run` object. The `register_model()` method logs a model file in the Azure Machine Learning service workspace and makes it available for deployment. Here's an example:\n\n```python\nfrom azureml.core import Model\n\nmodel_path = '.\/outputs\/my_model.pkl'\nmodel = Model.register(workspace=ws, model_path=model_path, model_name='my_model')\n```\n\nThis code registers the model file located at `model_path` to the Azure Machine Learning service workspace with the name `my_model`. \n\nAnother way to log a model is to save it as an output of a `Run`. If your model generation code is part of a script or Jupyter notebook that runs as an Azure Machine Learning experiment, you can save the model file as an output of the `Run` object. Here's an example:\n\n```python\nfrom sklearn.linear_model import LogisticRegression\nfrom azureml.core.run import Run\n\n# Initialize a run object\nrun = Run.get_context()\n\n# Train your model\nX_train, y_train = ...\nlog_reg = LogisticRegression().fit(X_train, y_train)\n\n# Save the model to the Run object's outputs directory\nmodel_path = 'outputs\/model.pkl'\njoblib.dump(value=log_reg, filename=model_path)\n\n# Log the model as a run artifact\nrun.upload_file(name=model_path, path_or_stream=model_path)\n```\n\nIn this code, `Run.get_context()` retrieves the current run context object, which you can use to track metadata and metrics for the run. After training your model, you can use `joblib.dump()` to save the model to a file, and then log the file as an artifact of the run using `run.upload_file()`.","ground_truth":"Paris is the capital of France."} +{"query":"What is the capital of France?","context":"France is in Europe","response":"Paris is the capital of France.", "ground_truth":"Paris is the capital of France."} diff --git a/sdk/ai/azure-ai-projects/tests/evaluation/evaluation_test_base.py b/sdk/ai/azure-ai-projects/tests/evaluation/evaluation_test_base.py new file mode 100644 index 000000000000..8010ef7effcc --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/evaluation/evaluation_test_base.py @@ -0,0 +1,46 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import sys +import logging +import functools +from dotenv import load_dotenv +from azure.ai.projects import AIProjectClient +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader + + +servicePreparerEvaluationsTests = functools.partial( + EnvironmentVariableLoader, + "azure_ai_projects_evaluations_tests", + azure_ai_projects_evaluations_tests_project_connection_string="region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;project-name", + azure_ai_projects_evaluations_tests_default_aoai_connection_name="connection_name", + azure_ai_projects_evaluations_tests_deployment_name="deployment_name", + azure_ai_projects_evaluations_tests_api_version="2024-09-01-preview", + azure_ai_projects_evaluations_tests_dataset_id="test_dataset_id", +) + +# Set to True to enable SDK logging +LOGGING_ENABLED = False + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + + +class EvaluationsTestBase(AzureRecordedTestCase): + + def get_sync_client(self, **kwargs) -> AIProjectClient: + conn_str = kwargs.pop("azure_ai_projects_evaluations_tests_project_connection_string") + project_client = AIProjectClient.from_connection_string( + credential=self.get_credential(AIProjectClient, is_async=False), + conn_str=conn_str, + logging_enable=LOGGING_ENABLED, + ) + return project_client diff --git a/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py b/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py new file mode 100644 index 000000000000..ab584442c5ca --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py @@ -0,0 +1,73 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy, get_credential, set_bodiless_matcher + +from azure.ai.projects.models import Evaluation, Dataset, EvaluatorConfiguration +from tests.evaluation.evaluation_test_base import EvaluationsTestBase, servicePreparerEvaluationsTests + +class TestEvaluation(EvaluationsTestBase): + + @servicePreparerEvaluationsTests() + @recorded_by_proxy + def test_evaluation_create(self, **kwargs): + set_bodiless_matcher() + default_aoai_connection_name = kwargs.pop("azure_ai_projects_evaluations_tests_default_aoai_connection_name") + project_client = self.get_sync_client(**kwargs) + default_aoai_connection = project_client.connections.get(connection_name=default_aoai_connection_name) + deployment_name = kwargs.get("azure_ai_projects_evaluations_tests_deployment_name") + api_version = kwargs.get("azure_ai_projects_evaluations_tests_api_version") + dataset_id = kwargs.get("azure_ai_projects_evaluations_tests_dataset_id") + + + evaluation = Evaluation( + display_name="Remote Evaluation E2E Test", + description="Evaluation of dataset using F1Score and Relevance evaluators", + data=Dataset(id=dataset_id), + evaluators={ + "f1_score": EvaluatorConfiguration( + id="azureml://registries/azureml-staging/models/F1Score-Evaluator/versions/3", + ), + "relevance": EvaluatorConfiguration( + id="azureml://registries/azureml-staging/models/Relevance-Evaluator/versions/3", + init_params={ + "model_config": default_aoai_connection.to_evaluator_model_config( + deployment_name=deployment_name, api_version=api_version + ) + }, + ), + }, + ) + + created_evaluation = project_client.evaluations.create(evaluation) + + assert created_evaluation.id is not None + assert created_evaluation.display_name == "Remote Evaluation E2E Test" + assert created_evaluation.description == "Evaluation of dataset using F1Score and Relevance evaluators" + assert created_evaluation.evaluators is not None + + assert created_evaluation.evaluators["f1_score"] is not None + assert created_evaluation.evaluators["f1_score"].id is not None + + assert created_evaluation.evaluators["relevance"] is not None + assert created_evaluation.evaluators["relevance"].id is not None + + + retrieved_evaluation = project_client.evaluations.get(created_evaluation.id) + + assert retrieved_evaluation.id == created_evaluation.id + assert retrieved_evaluation.id is not None + assert retrieved_evaluation.display_name == "Remote Evaluation E2E Test" + assert retrieved_evaluation.description == "Evaluation of dataset using F1Score and Relevance evaluators" + assert retrieved_evaluation.evaluators is not None + + assert retrieved_evaluation.evaluators["f1_score"] is not None + assert retrieved_evaluation.evaluators["f1_score"].id is not None + + assert retrieved_evaluation.evaluators["relevance"] is not None + assert retrieved_evaluation.evaluators["relevance"].id is not None + + # This failed with error : AttributeError: 'InputData' object has no attribute 'id' # TODO : Fix this + # assert created_evaluation.data.id == dataset_id From 4479f1229a3547d741df4a2a95264e9dbcda5504 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:47:22 -0800 Subject: [PATCH 090/138] Update how the inference URL is constructed (#38409) * First * Revert back this change --- .../azure/ai/projects/aio/operations/_patch.py | 12 +++++++++--- .../azure/ai/projects/operations/_patch.py | 13 +++++++++---- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 6dcdd0515bc0..7c567d170e6a 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -14,7 +14,6 @@ import time from pathlib import Path from azure.core.exceptions import ResourceNotFoundError -from io import TextIOWrapper from typing import ( IO, Any, @@ -107,7 +106,10 @@ async def get_chat_completions_client(self, **kwargs) -> "Optional[ChatCompletio if use_serverless_connection: endpoint = connection.endpoint_url else: - endpoint = f"https://{connection.name}.services.ai.azure.com/models" + # Be sure to use the Azure resource name here, not the connection name. Connection name is something that + # admins can pick when they manually create a new connection (or use bicep). Get the Azure resource name + # from the end of the connection id. + endpoint = f"https://{connection.id.split('/')[-1]}.services.ai.azure.com/models" if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( @@ -174,7 +176,11 @@ async def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": if use_serverless_connection: endpoint = connection.endpoint_url else: - endpoint = f"https://{connection.name}.services.ai.azure.com/models" + # Be sure to use the Azure resource name here, not the connection name. Connection name is something that + # admins can pick when they manually create a new connection (or use bicep). Get the Azure resource name + # from the end of the connection id. + endpoint = f"https://{connection.id.split('/')[-1]}.services.ai.azure.com/models" + if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 1e3769ea5efc..72edf26ca42a 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -10,7 +10,6 @@ """ import sys, io, logging, os, time from azure.core.exceptions import ResourceNotFoundError -from io import TextIOWrapper from typing import List, Union, IO, Any, Dict, Optional, overload, Sequence, TYPE_CHECKING, Iterator, TextIO, cast from pathlib import Path @@ -97,7 +96,10 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": if use_serverless_connection: endpoint = connection.endpoint_url else: - endpoint = f"https://{connection.name}.services.ai.azure.com/models" + # Be sure to use the Azure resource name here, not the connection name. Connection name is something that + # admins can pick when they manually create a new connection (or use bicep). Get the Azure resource name + # from the end of the connection id. + endpoint = f"https://{connection.id.split('/')[-1]}.services.ai.azure.com/models" if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( @@ -164,7 +166,10 @@ def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": if use_serverless_connection: endpoint = connection.endpoint_url else: - endpoint = f"https://{connection.name}.services.ai.azure.com/models" + # Be sure to use the Azure resource name here, not the connection name. Connection name is something that + # admins can pick when they manually create a new connection (or use bicep). Get the Azure resource name + # from the end of the connection id. + endpoint = f"https://{connection.id.split('/')[-1]}.services.ai.azure.com/models" if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( @@ -395,7 +400,7 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: tp = cast(TracerProvider, trace.get_tracer_provider()) tp.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint=destination))) - elif isinstance(destination, TextIOWrapper): + elif isinstance(destination, io.TextIOWrapper): if destination is sys.stdout: # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter try: From 60ada67a34c03b1e2cb224564f1bdd75b3e91201 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 8 Nov 2024 07:18:42 -0800 Subject: [PATCH 091/138] Fix some cspell errors, and link errors (#38420) --- .vscode/cspell.json | 7 +++++ sdk/ai/azure-ai-projects/README.md | 26 ++++++++++++------- .../azure/ai/projects/models/_models.py | 2 +- .../agents/sample_agents_code_interpreter.py | 4 +-- .../tests/evaluation/test_evaluation.py | 2 +- 5 files changed, 27 insertions(+), 14 deletions(-) diff --git a/.vscode/cspell.json b/.vscode/cspell.json index 5663f8faf23a..25c093b71a35 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -33,6 +33,7 @@ "sdk/batch/azure-batch/**", "sdk/ai/**/index/**", "sdk/ai/azure-ai-generative/tests/**", + "sdk/ai/azure-ai-projects/samples/agents/nifty_500_quarterly_results.csv", "sdk/ai/azure-ai-resources/azure/ai/resources/_index/_langchain/vendor/**", "sdk/ai/azure-ai-resources/azure/ai/resources/_restclient/**", "sdk/cognitiveservices/azure-cognitiveservices-search-autosuggest/**", @@ -1321,6 +1322,12 @@ "smirnov" ] }, + { + "filename": "sdk/ai/azure-ai-projects/**", + "words": [ + "aiservices" + ] + }, { "filename": "sdk/ai/azure-ai-inference/**", "words": [ diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 21cc8ba3028c..360b319741db 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -36,6 +36,10 @@ Use the returned token credential to authenticate the client: >>> client = AIProjectClient(endpoint='', credential=DefaultAzureCredential()) ``` +## Key concepts + +TODO + ## Examples ### Agents @@ -58,7 +62,7 @@ The following steps outline the typical sequence for interacting with agents: #### Create Project Client -When you create an project client, you need to make the decision to use synchronized or asynchronized client. Use either: +When you create a project client, you need to make the decision to use synchronous or asynchronous client. Use either: ```python from azure.ai.projects import AIProjectClient @@ -81,7 +85,7 @@ project_client = AIProjectClient.from_connection_string( Because the client is under resource and context manager, you are required to use `with` or `async with` to consume the client object: ```python -# For synchronize +# For synchronous with project_client: agent = project_client.agents.create_agent( model="gpt-4-1106-preview", @@ -89,7 +93,7 @@ with project_client: instructions="You are helpful assistant" ) -# For asynchronize +# For asynchronous async with project_client: agent = project_client.agents.create_agent( model="gpt-4-1106-preview", @@ -99,7 +103,7 @@ async with project_client: ``` -In the sections below, we will only provide code snips in synchronized functions +In the sections below, we will only provide code snippets in synchronous functions. #### Create Agent @@ -185,9 +189,10 @@ agent = project_client.agents.create_agent( Again, you can define `toolset` instead of passing `tools` and `tool_resources`. #### Create Agent with File Upload for Code Interpreter + Here is an example to upload a file and use it for code interpreter by an agent: - + ```python file = project_client.agents.upload_file_and_poll( @@ -210,14 +215,15 @@ agent = project_client.agents.create_agent( #### Create Agent with Function Tool + You can enhance your agents by defining callback functions as function tools. These can be provided to `create_agent` via either the `toolset` parameter or the combination of `tools` and `tool_resources`. Here are the distinctions: - `toolset`: When using the `toolset` parameter, you provide not only the function definitions and descriptions but also their implementations. The SDK will execute these functions within `create_and_run_process` or `streaming` . These functions will be invoked based on their definitions. - `tools` and `tool_resources`: When using the `tools` and `tool_resources` parameters, only the function definitions and descriptions are provided to `create_agent`, without the implementations. The `Run` or `event handler of stream` will raise a `requires_action` status based on the function definitions. Your code must handle this status and call the appropriate functions. -For more details about calling functions by code, refer to [`sample_agents_stream_eventhandler_with_functions.py`](samples/agents/sample_agents_stream_eventhandler_with_functions.py) and [`sample_agents_functions.py`](samples/agents/sample_agents_functions.py). +For more details about calling functions by code, refer to [`sample_agents_stream_eventhandler_with_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py) and [`sample_agents_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py). -Here is an example to use [user functions](samples/agents/user_function.py) in `toolset`: +Here is an example to use [user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/user_function.py) in `toolset`: ```python @@ -232,7 +238,7 @@ agent = project_client.agents.create_agent( -For asynchronized functions, you must import `AIProjectClient` from `azure.ai.projects.aio` and use `AsyncFunctionTool`. Here is an example using [asynchronized user functions](samples/agents/async_samples/user_async_functions.py): +For asynchronous functions, you must import `AIProjectClient` from `azure.ai.projects.aio` and use `AsyncFunctionTool`. Here is an example using [asynchronous user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/async_samples/user_async_functions.py): ```python from azure.ai.projects.aio import AIProjectClient @@ -268,7 +274,7 @@ For each session or conversation, a thread is required. Here is an example: #### Create Thread with Tool Resource -In some scenarios, you might need to assign specific resources to individual threads. To achieve this, you provide the `tool_resources` argument to `create_thread`. In the following example, you create a vector storre and upload a file, enable an agent for file search using the `tools` argument, and then associate the file with the thread using the `tool_resources` argument. +In some scenarios, you might need to assign specific resources to individual threads. To achieve this, you provide the `tool_resources` argument to `create_thread`. In the following example, you create a vector store and upload a file, enable an agent for file search using the `tools` argument, and then associate the file with the thread using the `tool_resources` argument. @@ -359,7 +365,7 @@ message = project_client.agents.create_message( To process your message, you can use `create_run`, `create_and_process_run`, or `create_stream`. -`create_run` requests the agent to process the message without polling for the result. If you are using `function tools` regardless as `toolset` or not, your code is responsible for polling for the result and acknowledging the status of `Run`. When the status is `requires_action`, your code is responsible for calling the function tools. For a code sample, visit [`sample_agents_functions.py`](samples/agents/sample_agents_functions.py). +`create_run` requests the agent to process the message without polling for the result. If you are using `function tools` regardless as `toolset` or not, your code is responsible for polling for the result and acknowledging the status of `Run`. When the status is `requires_action`, your code is responsible for calling the function tools. For a code sample, visit [`sample_agents_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py). Here is an example of `create_run` and poll until the run is completed: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index cca2208f8884..f1a362ba8448 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -5929,7 +5929,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class VectorStoreFileError(_model_base.Model): - """Details on the error that may have ocurred while processing a file for this vector store. + """Details on the error that may have occurred while processing a file for this vector store. :ivar code: One of ``server_error`` or ``rate_limit_exceeded``. Required. Known values are: diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py index d48705d905b0..ca0a4d2f2b4f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py @@ -39,7 +39,7 @@ with project_client: # upload a file and wait for it to be processed - # [START upload_file_and_creae_agent_with_code_interpreter] + # [START upload_file_and_create_agent_with_code_interpreter] file = project_client.agents.upload_file_and_poll( file_path="nifty_500_quarterly_results.csv", purpose=FilePurpose.AGENTS ) @@ -55,7 +55,7 @@ tools=code_interpreter.definitions, tool_resources=code_interpreter.resources, ) - # [END upload_file_and_creae_agent_with_code_interpreter] + # [END upload_file_and_create_agent_with_code_interpreter] print(f"Created agent, agent ID: {agent.id}") thread = project_client.agents.create_thread() diff --git a/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py b/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py index ab584442c5ca..0857b651db74 100644 --- a/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py +++ b/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py @@ -6,7 +6,7 @@ from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy, get_credential, set_bodiless_matcher from azure.ai.projects.models import Evaluation, Dataset, EvaluatorConfiguration -from tests.evaluation.evaluation_test_base import EvaluationsTestBase, servicePreparerEvaluationsTests +from evaluation_test_base import EvaluationsTestBase, servicePreparerEvaluationsTests class TestEvaluation(EvaluationsTestBase): From 75a535452d53889ab9c17179bd6efad0b5165b9b Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 8 Nov 2024 07:30:52 -0800 Subject: [PATCH 092/138] cspell lint --config .vscode/cspell.json --no-summary "sdk/ai/azure-ai-projects/**/*" --- .../azure/ai/projects/aio/operations/_patch.py | 7 ++----- .../azure/ai/projects/operations/_patch.py | 10 +++++----- .../sample_agents_run_with_toolset_async.py | 2 +- .../samples/agents/sample_agents_basics.py | 14 +++++++------- .../samples/agents/sample_agents_bing_grounding.py | 6 ++---- .../agents/sample_agents_code_interpreter.py | 2 +- .../samples/agents/sample_agents_file_search.py | 6 ++---- .../samples/agents/sample_agents_functions.py | 7 +------ ..._agents_functions_with_azure_monitor_tracing.py | 7 +------ ...sample_agents_functions_with_console_tracing.py | 6 +----- .../agents/sample_agents_run_with_toolset.py | 6 +++--- .../agents/sample_agents_stream_eventhandler.py | 11 +++++++---- ...le_agents_stream_eventhandler_with_functions.py | 4 ++-- ...mple_agents_stream_eventhandler_with_toolset.py | 2 +- ...sample_agents_vector_store_batch_file_search.py | 2 +- ...agents_with_code_interpreter_file_attachment.py | 6 +++--- .../sample_agents_with_resources_in_thread.py | 8 +++----- .../samples/connections/sample_connections.py | 2 +- sdk/ai/azure-ai-projects/tests/conftest.py | 10 ++++++---- .../tests/connections/connection_test_base.py | 10 +++++++--- .../tests/connections/test_connections.py | 10 ++++++++-- .../tests/connections/test_connections_async.py | 10 ++++++++-- .../tests/evaluation/test_evaluation.py | 11 ++++++++--- 23 files changed, 81 insertions(+), 78 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 7c567d170e6a..f4ef2192c7ad 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -27,7 +27,7 @@ overload, Sequence, TYPE_CHECKING, - TextIO + TextIO, ) from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated @@ -181,7 +181,6 @@ async def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": # from the end of the connection id. endpoint = f"https://{connection.id.split('/')[-1]}.services.ai.azure.com/models" - if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using API key authentication" @@ -308,9 +307,7 @@ async def get_default( raise ResourceNotFoundError(f"No connection of type {connection_type} found") @distributed_trace_async - async def get( - self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: + async def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: """Get the properties of a single connection, given its connection name, with or without populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError exception if a connection with the given name was not found. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 72edf26ca42a..37459d84365e 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -134,7 +134,7 @@ def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": does not exist. Raises ~azure.core.exceptions.ModuleNotFoundError exception if the `azure-ai-inference` package is not installed. - + :return: An authenticated chat completions client :rtype: ~azure.ai.inference.models.EmbeddingsClient :raises ~azure.core.exceptions.ResourceNotFoundError: @@ -297,9 +297,7 @@ def get_default( raise ResourceNotFoundError(f"No connection of type {connection_type} found") @distributed_trace - def get( - self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any - ) -> ConnectionProperties: + def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: """Get the properties of a single connection, given its connection name, with or without populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError exception if a connection with the given name was not found. @@ -343,7 +341,9 @@ def get( return ConnectionProperties(connection=connection) @distributed_trace - def list(self, *, connection_type: Optional[ConnectionType] = None, **kwargs: Any) -> Sequence[ConnectionProperties]: + def list( + self, *, connection_type: Optional[ConnectionType] = None, **kwargs: Any + ) -> Sequence[ConnectionProperties]: """List the properties of all connections, or all connections of a certain connection type. :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py index 91b38d7174c1..ad3950642ea9 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py @@ -52,7 +52,7 @@ async def main(): agent = await project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset ) - # [END create_agent_with_async_function_tool] + # [END create_agent_with_async_function_tool] print(f"Created agent, ID: {agent.id}") # Create thread for communication diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py index a9194ad747a7..5a7cbb265abc 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py @@ -37,17 +37,17 @@ # [END create_project_client] with project_client: - - # [START create_agent] + + # [START create_agent] agent = project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" ) - # [END create_agent] + # [END create_agent] print(f"Created agent, agent ID: {agent.id}") - # [START create_thread] + # [START create_thread] thread = project_client.agents.create_thread() - # [END create_thread] + # [END create_thread] print(f"Created thread, thread ID: {thread.id}") # [START create_message] @@ -63,7 +63,7 @@ # wait for a second time.sleep(1) run = project_client.agents.get_run(thread_id=thread.id, run_id=run.id) - # [END create_run] + # [END create_run] print(f"Run status: {run.status}") project_client.agents.delete_agent(agent.id) @@ -72,5 +72,5 @@ # [START list_messages] messages = project_client.agents.list_messages(thread_id=thread.id) # [END list_messages] - + print(f"messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py index 12384b648563..d1e25464841a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py @@ -36,9 +36,7 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) -bing_connection = project_client.connections.get( - connection_name=os.environ["BING_CONNECTION_NAME"] -) +bing_connection = project_client.connections.get(connection_name=os.environ["BING_CONNECTION_NAME"]) conn_id = bing_connection.id print(conn_id) @@ -53,7 +51,7 @@ name="my-assistant", instructions="You are a helpful assistant", tools=bing.definitions, - headers={"x-ms-enable-preview": "true"} + headers={"x-ms-enable-preview": "true"}, ) print(f"Created agent, ID: {agent.id}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py index ca0a4d2f2b4f..c506099fb75c 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py @@ -55,7 +55,7 @@ tools=code_interpreter.definitions, tool_resources=code_interpreter.resources, ) - # [END upload_file_and_create_agent_with_code_interpreter] + # [END upload_file_and_create_agent_with_code_interpreter] print(f"Created agent, agent ID: {agent.id}") thread = project_client.agents.create_thread() diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py index c16e6f72056b..dbbd7af862cc 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py @@ -42,9 +42,7 @@ file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") print(f"Uploaded file, file ID: {file.id}") - vector_store = project_client.agents.create_vector_store_and_poll( - file_ids=[file.id], name="my_vectorstore" - ) + vector_store = project_client.agents.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") print(f"Created vector store, vector store ID: {vector_store.id}") # Create file search tool with resources followed by creating agent @@ -58,7 +56,7 @@ tool_resources=file_search.resources, ) # [END upload_file_create_vector_store_and_agent_with_file_search_tool] - + print(f"Created agent, ID: {agent.id}") # Create thread for communication diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py index 203307a71c80..6e675316a28d 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py @@ -23,12 +23,7 @@ import os, time from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential -from azure.ai.projects.models import ( - FunctionTool, - RequiredFunctionToolCall, - SubmitToolOutputsAction, - ToolOutput -) +from azure.ai.projects.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput from user_functions import user_functions diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py index 0c7731a7f386..c4e7ecf71f5a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py @@ -28,12 +28,7 @@ from typing import Any, Callable, Set from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential -from azure.ai.projects.models import ( - FunctionTool, - RequiredFunctionToolCall, - SubmitToolOutputsAction, - ToolOutput -) +from azure.ai.projects.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput from opentelemetry import trace from azure.monitor.opentelemetry import configure_azure_monitor diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py index 9a8c0e34e40b..a82adb3823b6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py @@ -33,11 +33,7 @@ from typing import Any, Callable, Set from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential -from azure.ai.projects.models import ( - FunctionTool, - RequiredFunctionToolCall, - SubmitToolOutputsAction, - ToolOutput) +from azure.ai.projects.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput from opentelemetry import trace diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py index 9899e4b2ad1a..39f176f6c297 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_run_with_toolset.py @@ -40,18 +40,18 @@ # Create agent with toolset and process assistant run with project_client: # Initialize agent toolset with user functions and code interpreter - # [START create_agent_toolset] + # [START create_agent_toolset] functions = FunctionTool(user_functions) code_interpreter = CodeInterpreterTool() toolset = ToolSet() toolset.add(functions) toolset.add(code_interpreter) - + agent = project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset ) - # [END create_agent_toolset] + # [END create_agent_toolset] print(f"Created agent, ID: {agent.id}") # Create thread for communication diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py index bc92f89450aa..1d255d1cd504 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler.py @@ -46,7 +46,8 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) -# [START stream_event_handler] + +# [START stream_event_handler] class MyEventHandler(AgentEventHandler): def on_message_delta(self, delta: "MessageDeltaChunk") -> None: for content_part in delta.delta.content: @@ -71,7 +72,9 @@ def on_done(self) -> None: def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") -# [END stream_event_handler] + + +# [END stream_event_handler] with project_client: @@ -87,12 +90,12 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") print(f"Created message, message ID {message.id}") - # [START create_stream] + # [START create_stream] with project_client.agents.create_stream( thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() ) as stream: stream.until_done() - # [END create_stream] + # [END create_stream] project_client.agents.delete_agent(agent.id) print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py index 1f0ba29f638d..a061bf02d7f8 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py @@ -30,7 +30,7 @@ MessageDeltaTextContent, RequiredFunctionToolCall, SubmitToolOutputsAction, - ToolOutput + ToolOutput, ) from azure.identity import DefaultAzureCredential from user_functions import user_functions @@ -103,7 +103,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: with project_client: - + # [START create_agent_with_function_tool] functions = FunctionTool(user_functions) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index e97952601539..0bbbcf3cab88 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -93,7 +93,7 @@ def on_unhandled_event(self, event_type: str, event_data: Any) -> None: agent = project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset ) - # [END create_agent_with_function_tool] + # [END create_agent_with_function_tool] print(f"Created agent, ID: {agent.id}") thread = project_client.agents.create_thread() diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py index d65ba2b6abd3..d3d83045876a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -63,7 +63,7 @@ tools=file_search_tool.definitions, tool_resources=file_search_tool.resources, ) - # [END create_agent_with_tools_and_tool_resources] + # [END create_agent_with_tools_and_tool_resources] print(f"Created agent, agent ID: {agent.id}") thread = project_client.agents.create_thread() diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_code_interpreter_file_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_code_interpreter_file_attachment.py index de4e8827d1a6..8c023f53d3c3 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_code_interpreter_file_attachment.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_code_interpreter_file_attachment.py @@ -46,7 +46,7 @@ print(f"Uploaded file, file ID: {file.id}") # [START create_agent_and_message_with_code_interpreter_file_attachment] - # notice that CodeInterpreter must be enabled in the agent creation, + # notice that CodeInterpreter must be enabled in the agent creation, # otherwise the agent will not be able to see the file attachment for code interpretation agent = project_client.agents.create_agent( model="gpt-4-1106-preview", @@ -61,13 +61,13 @@ # create an attachment attachment = MessageAttachment(file_id=file.id, tools=CodeInterpreterTool().definitions) - + # create a message message = project_client.agents.create_message( thread_id=thread.id, role="user", content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", - attachments=[attachment] + attachments=[attachment], ) # [END create_agent_and_message_with_code_interpreter_file_attachment] print(f"Created message, message ID: {message.id}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py index d96a05128cc3..bd00e37e6fbf 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py @@ -42,11 +42,9 @@ file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") print(f"Uploaded file, file ID: {file.id}") - vector_store = project_client.agents.create_vector_store_and_poll( - file_ids=[file.id], name="my_vectorstore" - ) + vector_store = project_client.agents.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") print(f"Created vector store, vector store ID: {vector_store.id}") - + # Create file search tool with resources followed by creating agent file_search = FileSearchTool(vector_store_ids=[vector_store.id]) @@ -57,7 +55,7 @@ tools=file_search.definitions, ) # [END upload_file_create_vector_store_and_agent_with_file_search_tool] - + print(f"Created agent, ID: {agent.id}") # Create thread with file resources. diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index 64a5aecae3e1..38f1397155c3 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -29,7 +29,7 @@ from azure.ai.projects.models import ConnectionType, AuthenticationType from azure.identity import DefaultAzureCredential -#from azure.identity import DefaultAzureCredential, get_bearer_token_provider +# from azure.identity import DefaultAzureCredential, get_bearer_token_provider project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] connection_name = os.environ["CONNECTION_NAME"] diff --git a/sdk/ai/azure-ai-projects/tests/conftest.py b/sdk/ai/azure-ai-projects/tests/conftest.py index 03ba48a9ca49..303dc2e6bd5e 100644 --- a/sdk/ai/azure-ai-projects/tests/conftest.py +++ b/sdk/ai/azure-ai-projects/tests/conftest.py @@ -12,6 +12,7 @@ if not load_dotenv(find_dotenv(filename="azure_ai_projects_tests.env"), override=True): print("Failed to apply environment variables for azure-ai-projects tests.") + class SanitizedValues: SUBSCRIPTION_ID = "00000000-0000-0000-0000-000000000000" RESOURCE_GROUP_NAME = "00000" @@ -21,6 +22,7 @@ class SanitizedValues: TENANT_ID = "00000000-0000-0000-0000-000000000000" USER_OBJECT_ID = "00000000-0000-0000-0000-000000000000" + @pytest.fixture(scope="session") def mock_project_scope(): return { @@ -29,18 +31,21 @@ def mock_project_scope(): "project_name": f"{SanitizedValues.WORKSPACE_NAME}", } + @pytest.fixture(scope="session") def mock_dataset_name(): return { "dataset_name": f"{SanitizedValues.DATASET_NAME}", } + @pytest.fixture(scope="session") def mock_connection_name(): return { "connection_name": f"{SanitizedValues.CONNECTION_NAME}", } + # autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method @pytest.fixture(scope="session", autouse=True) def start_proxy(test_proxy): @@ -76,11 +81,8 @@ def azure_workspace_triad_sanitizer(): regex=r"/data/([-\w\._\(\)]+)", value=mock_dataset_name["dataset_name"], group_for_replace="1" ) - add_general_regex_sanitizer( - regex=r"/runs/([-\w\._\(\)]+)", value="Sanitized", group_for_replace="1" - ) + add_general_regex_sanitizer(regex=r"/runs/([-\w\._\(\)]+)", value="Sanitized", group_for_replace="1") azure_workspace_triad_sanitizer() remove_batch_sanitizers(["AZSDK3493"]) - diff --git a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py index e9b9202d0053..dfe9f7024b87 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py @@ -39,10 +39,14 @@ class ConnectionsTestBase(AzureRecordedTestCase): NON_EXISTING_CONNECTION_NAME = "non-existing-connection-name" - EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_NAME = f"Connection {NON_EXISTING_CONNECTION_NAME} can't be found in this workspace" + EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_NAME = ( + f"Connection {NON_EXISTING_CONNECTION_NAME} can't be found in this workspace" + ) NON_EXISTING_CONNECTION_TYPE = "non-existing-connection-type" - EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_TYPE = f"No connection of type {NON_EXISTING_CONNECTION_TYPE} found" + EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_TYPE = ( + f"No connection of type {NON_EXISTING_CONNECTION_TYPE} found" + ) def get_sync_client(self, **kwargs) -> AIProjectClient: conn_str = kwargs.pop("azure_ai_projects_connections_tests_project_connection_string") @@ -70,7 +74,7 @@ def validate_connection( *, expected_connection_type: ConnectionType = None, expected_connection_name: str = None, - expected_authentication_type: AuthenticationType = None + expected_authentication_type: AuthenticationType = None, ): assert connection.id is not None diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py index ebfd4c9db70e..10fd453fd4e0 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py @@ -23,7 +23,10 @@ def test_connections_get(self, **kwargs): for with_credentials in [True, False]: try: - connection_properties = project_client.connections.get(connection_name=ConnectionsTestBase.NON_EXISTING_CONNECTION_NAME, with_credentials=with_credentials) + connection_properties = project_client.connections.get( + connection_name=ConnectionsTestBase.NON_EXISTING_CONNECTION_NAME, + with_credentials=with_credentials, + ) assert False except ResourceNotFoundError as e: print(e) @@ -78,7 +81,10 @@ def test_connections_get_default(self, **kwargs): for with_credentials in [True, False]: try: - connection_properties = project_client.connections.get_default(connection_type=ConnectionsTestBase.NON_EXISTING_CONNECTION_TYPE, with_credentials=with_credentials) + connection_properties = project_client.connections.get_default( + connection_type=ConnectionsTestBase.NON_EXISTING_CONNECTION_TYPE, + with_credentials=with_credentials, + ) assert False except ResourceNotFoundError as e: print(e) diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py index 8688fc2df694..a2299ecc334d 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py @@ -22,7 +22,10 @@ async def test_connections_get_async(self, **kwargs): for with_credentials in [True, False]: try: - connection_properties = await project_client.connections.get(connection_name=ConnectionsTestBase.NON_EXISTING_CONNECTION_NAME, with_credentials=with_credentials) + connection_properties = await project_client.connections.get( + connection_name=ConnectionsTestBase.NON_EXISTING_CONNECTION_NAME, + with_credentials=with_credentials, + ) assert False except ResourceNotFoundError as e: print(e) @@ -81,7 +84,10 @@ async def test_connections_get_default_async(self, **kwargs): for with_credentials in [True, False]: try: - connection_properties = await project_client.connections.get_default(connection_type=ConnectionsTestBase.NON_EXISTING_CONNECTION_TYPE, with_credentials=with_credentials) + connection_properties = await project_client.connections.get_default( + connection_type=ConnectionsTestBase.NON_EXISTING_CONNECTION_TYPE, + with_credentials=with_credentials, + ) assert False except ResourceNotFoundError as e: print(e) diff --git a/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py b/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py index 0857b651db74..ef28272b0e8a 100644 --- a/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py +++ b/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py @@ -3,11 +3,18 @@ # Licensed under the MIT License. # ------------------------------------ -from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy, get_credential, set_bodiless_matcher +from devtools_testutils import ( + AzureRecordedTestCase, + EnvironmentVariableLoader, + recorded_by_proxy, + get_credential, + set_bodiless_matcher, +) from azure.ai.projects.models import Evaluation, Dataset, EvaluatorConfiguration from evaluation_test_base import EvaluationsTestBase, servicePreparerEvaluationsTests + class TestEvaluation(EvaluationsTestBase): @servicePreparerEvaluationsTests() @@ -21,7 +28,6 @@ def test_evaluation_create(self, **kwargs): api_version = kwargs.get("azure_ai_projects_evaluations_tests_api_version") dataset_id = kwargs.get("azure_ai_projects_evaluations_tests_dataset_id") - evaluation = Evaluation( display_name="Remote Evaluation E2E Test", description="Evaluation of dataset using F1Score and Relevance evaluators", @@ -54,7 +60,6 @@ def test_evaluation_create(self, **kwargs): assert created_evaluation.evaluators["relevance"] is not None assert created_evaluation.evaluators["relevance"].id is not None - retrieved_evaluation = project_client.evaluations.get(created_evaluation.id) assert retrieved_evaluation.id == created_evaluation.id From 267c0f7fc45414fc605547709a06051f06c77000 Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Fri, 8 Nov 2024 08:52:28 -0800 Subject: [PATCH 093/138] Resolved comments from Krista (#38405) * Address comments * Run black --- .../ai/projects/aio/operations/_patch.py | 2 -- .../azure/ai/projects/models/_patch.py | 23 +++++++++++-------- .../azure/ai/projects/operations/_patch.py | 3 +-- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index f4ef2192c7ad..d9603faf2c35 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -1214,7 +1214,6 @@ async def create_run( else: raise ValueError("Invalid combination of arguments provided.") - # If streaming is enabled, return the custom stream object return await response @distributed_trace_async @@ -1735,7 +1734,6 @@ async def submit_tool_outputs_to_run( else: raise ValueError("Invalid combination of arguments provided.") - # If streaming is enabled, return the custom stream object return await response @overload diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 62eba59f6a09..2fc29572a785 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -946,7 +946,10 @@ async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: pass -class AsyncAgentRunStream(AsyncIterator[Tuple[str, Any]]): +StreamEventData = Union[MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep, None] + + +class AsyncAgentRunStream(AsyncIterator[Tuple[str, StreamEventData]]): def __init__( self, response_iterator: AsyncIterator[bytes], @@ -972,7 +975,7 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): def __aiter__(self): return self - async def __anext__(self) -> Tuple[str, Any]: + async def __anext__(self) -> Tuple[str, StreamEventData]: while True: try: chunk = await self.response_iterator.__anext__() @@ -988,7 +991,7 @@ async def __anext__(self) -> Tuple[str, Any]: event_data_str, self.buffer = self.buffer.split("\n\n", 1) return await self._process_event(event_data_str) - def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: + def _parse_event_data(self, event_data_str: str) -> Tuple[str, StreamEventData]: event_lines = event_data_str.strip().split("\n") event_type = None event_data = "" @@ -1003,7 +1006,9 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: raise ValueError("Event type not specified in the event data.") try: - parsed_data: Union[str, Dict[str, Any]] = cast(Dict[str, Any], json.loads(event_data)) + parsed_data: Union[str, Dict[str, StreamEventData]] = cast( + Dict[str, StreamEventData], json.loads(event_data) + ) except json.JSONDecodeError: parsed_data = event_data @@ -1049,7 +1054,7 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: return event_type, event_data_obj - async def _process_event(self, event_data_str: str) -> Tuple[str, Any]: + async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData]: event_type, event_data_obj = self._parse_event_data(event_data_str) if ( @@ -1093,7 +1098,7 @@ async def until_done(self) -> None: pass -class AgentRunStream(Iterator[Tuple[str, Any]]): +class AgentRunStream(Iterator[Tuple[str, StreamEventData]]): def __init__( self, response_iterator: Iterator[bytes], @@ -1117,7 +1122,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def __iter__(self): return self - def __next__(self) -> Tuple[str, Any]: + def __next__(self) -> Tuple[str, StreamEventData]: if self.done: raise StopIteration while True: @@ -1135,7 +1140,7 @@ def __next__(self) -> Tuple[str, Any]: event_data_str, self.buffer = self.buffer.split("\n\n", 1) return self._process_event(event_data_str) - def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: + def _parse_event_data(self, event_data_str: str) -> Tuple[str, StreamEventData]: event_lines = event_data_str.strip().split("\n") event_type = None event_data = "" @@ -1196,7 +1201,7 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, Any]: return event_type, event_data_obj - def _process_event(self, event_data_str: str) -> Tuple[str, Any]: + def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData]: event_type, event_data_obj = self._parse_event_data(event_data_str) if ( diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 37459d84365e..b50ec0594c39 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -1308,13 +1308,13 @@ def create_run( else: raise ValueError("Invalid combination of arguments provided.") - # If streaming is enabled, return the custom stream object return response @distributed_trace def create_and_process_run( self, thread_id: str, + *, assistant_id: str, model: Optional[str] = None, instructions: Optional[str] = None, @@ -1847,7 +1847,6 @@ def submit_tool_outputs_to_run( else: raise ValueError("Invalid combination of arguments provided.") - # If streaming is enabled, return the custom stream object return response @overload From db34c99dd80c6417a75ccbca2590d5ffe53b6e8e Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Fri, 8 Nov 2024 09:49:06 -0800 Subject: [PATCH 094/138] Add support of Azure asset IDs for the vector store. (#38066) * Generate code * Generate new code * Expose file API and add the async test. * Remove unnecessary change * Add async samples * Do not create ML Client in tests * Better file retrieval * Regenerate code * Re generate the code * Re generate code * Fix samle name and make iterface more clear * Add samples for attachment enterprise search * Add unit tests for message attachment * Finzlize sync tests. * Add unit tests and use constants as a URI types * Regenerate code * Fix * Generate new code * Draft commit * Add another unit test * Fix and add test data * Regenerate files * Add more unit tests * Re generate code * Regenerate files * Rename classes and regenerate the code * Fixes * Fix parameter name * Fix file names * Script to check sample names inside file comments. --- .../azure/ai/projects/_patch.py | 10 +- .../azure/ai/projects/aio/_patch.py | 10 +- .../ai/projects/aio/operations/_operations.py | 49 +- .../ai/projects/aio/operations/_patch.py | 166 +- .../azure/ai/projects/models/__init__.py | 10 + .../azure/ai/projects/models/_enums.py | 10 + .../azure/ai/projects/models/_models.py | 175 +- .../azure/ai/projects/models/_patch.py | 2 - .../ai/projects/operations/_operations.py | 49 +- .../azure/ai/projects/operations/_patch.py | 173 +- .../agents/_ai_agents_instrumentor.py | 5 +- ...gents_code_interpreter_attachment_async.py | 86 + ...eter_attachment_enterprise_search_async.py | 88 + ...tore_batch_enterprise_file_search_async.py | 108 ++ ...ctor_store_enterprise_file_search_async.py | 83 + ...e_agents_vector_store_file_search_async.py | 85 + ...nterpreter_attachment_enterprise_search.py | 81 + .../sample_agents_enterprise_file_search.py | 78 + ...ts_functions_with_azure_monitor_tracing.py | 2 +- ...e_agents_functions_with_console_tracing.py | 2 +- ...ctor_store_batch_enterprise_file_search.py | 103 ++ ...e_agents_vector_store_batch_file_search.py | 10 +- .../sample_agents_vector_store_file_search.py | 79 + .../async_samples/sample_evaluations_async.py | 2 +- .../samples/evaluations/sample_evaluations.py | 2 +- sdk/ai/azure-ai-projects/tests/README.md | 2 +- .../tests/agents/test_agents_client.py | 514 +++++- .../tests/agents/test_agents_client_async.py | 1628 +++++++++++++++++ .../tests/agents/test_vector_store.py | 41 + .../tests/check_sample_name.sh | 20 + .../tests/evaluation/test_evaluation.py | 1 + .../tests/test_data/product_info_1.md | 51 + sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 33 files changed, 3635 insertions(+), 92 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_enterprise_file_search.py create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py create mode 100644 sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py create mode 100644 sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py create mode 100644 sdk/ai/azure-ai-projects/tests/check_sample_name.sh create mode 100644 sdk/ai/azure-ai-projects/tests/test_data/product_info_1.md diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index 312367a8dec6..95dc6b8ea967 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -9,7 +9,7 @@ import uuid from os import PathLike from pathlib import Path -from typing import List, Any, Union, Dict +from typing import List, Any, Union, Dict, Tuple from typing_extensions import Self from azure.core.credentials import TokenCredential from azure.core import PipelineClient @@ -230,14 +230,14 @@ def from_connection_string(cls, conn_str: str, credential: "TokenCredential", ** project_name = parts[3] return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) - def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: + def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str]: """Upload a file to the Azure AI Studio project. This method required *azure-ai-ml* to be installed. :param file_path: The path to the file to upload. :type file_path: Union[str, Path, PathLike] - :return: The asset id of uploaded file. - :rtype: str + :return: The tuple, containing asset id and asset URI of uploaded file. + :rtype: Tuple[str] """ try: from azure.ai.ml import MLClient # type: ignore @@ -265,7 +265,7 @@ def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: data_asset = ml_client.data.create_or_update(data) - return data_asset.id + return data_asset.id, data_asset.path @property def scope(self) -> Dict[str, str]: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 9a21737b6bb8..97373d32cf3a 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -9,7 +9,7 @@ import uuid from os import PathLike from pathlib import Path -from typing import List, Any, Union, Dict, TYPE_CHECKING +from typing import List, Any, Union, Dict, Tuple, TYPE_CHECKING from azure.core import AsyncPipelineClient from azure.core.pipeline import policies from typing_extensions import Self @@ -233,14 +233,14 @@ def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential project_name = parts[3] return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) - def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: + def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str]: """Upload a file to the Azure AI Studio project. This method required *azure-ai-ml* to be installed. :param file_path: The path to the file to upload. :type file_path: Union[str, Path, PathLike] - :return: The asset id of uploaded file. - :rtype: str + :return: The tuple, containing asset id and asset URI of uploaded file. + :rtype: Tuple[str] """ try: from azure.ai.ml import MLClient # type: ignore @@ -268,7 +268,7 @@ def upload_file(self, file_path: Union[Path, str, PathLike]) -> str: data_asset = ml_client.data.create_or_update(data) - return data_asset.id + return data_asset.id, data_asset.path @property def scope(self) -> Dict[str, str]: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index 419f86e97145..ec7576832710 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -3695,6 +3695,7 @@ async def create_vector_store( content_type: str = "application/json", file_ids: Optional[List[str]] = None, name: Optional[str] = None, + store_configuration: Optional[_models.VectorStoreConfiguration] = None, expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, @@ -3710,6 +3711,9 @@ async def create_vector_store( :paramtype file_ids: list[str] :keyword name: The name of the vector store. Default value is None. :paramtype name: str + :keyword store_configuration: The vector store configuration, used when vector store is created + from Azure asset URIs. Default value is None. + :paramtype store_configuration: ~azure.ai.projects.models.VectorStoreConfiguration :keyword expires_after: Details on when this vector store expires. Default value is None. :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will @@ -3748,6 +3752,7 @@ async def create_vector_store( *, file_ids: Optional[List[str]] = None, name: Optional[str] = None, + store_configuration: Optional[_models.VectorStoreConfiguration] = None, expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, @@ -3762,6 +3767,9 @@ async def create_vector_store( :paramtype file_ids: list[str] :keyword name: The name of the vector store. Default value is None. :paramtype name: str + :keyword store_configuration: The vector store configuration, used when vector store is created + from Azure asset URIs. Default value is None. + :paramtype store_configuration: ~azure.ai.projects.models.VectorStoreConfiguration :keyword expires_after: Details on when this vector store expires. Default value is None. :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will @@ -3793,6 +3801,7 @@ async def create_vector_store( if body is _Unset: body = { "chunking_strategy": chunking_strategy, + "configuration": store_configuration, "expires_after": expires_after, "file_ids": file_ids, "metadata": metadata, @@ -4269,8 +4278,9 @@ async def create_vector_store_file( self, vector_store_id: str, *, - file_id: str, content_type: str = "application/json", + file_id: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any ) -> _models.VectorStoreFile: @@ -4278,11 +4288,13 @@ async def create_vector_store_file( :param vector_store_id: Identifier of the vector store. Required. :type vector_store_id: str - :keyword file_id: Identifier of the file. Required. - :paramtype file_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_sources: Azure asset ID. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -4315,7 +4327,8 @@ async def create_vector_store_file( vector_store_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - file_id: str = _Unset, + file_id: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any ) -> _models.VectorStoreFile: @@ -4325,8 +4338,10 @@ async def create_vector_store_file( :type vector_store_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword file_id: Identifier of the file. Required. + :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str + :keyword data_sources: Azure asset ID. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -4349,9 +4364,7 @@ async def create_vector_store_file( cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) if body is _Unset: - if file_id is _Unset: - raise TypeError("missing required argument: file_id") - body = {"chunking_strategy": chunking_strategy, "file_id": file_id} + body = {"chunking_strategy": chunking_strategy, "data_sources": data_sources, "file_id": file_id} body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -4568,8 +4581,9 @@ async def create_vector_store_file_batch( self, vector_store_id: str, *, - file_ids: List[str], content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any ) -> _models.VectorStoreFileBatch: @@ -4577,11 +4591,13 @@ async def create_vector_store_file_batch( :param vector_store_id: Identifier of the vector store. Required. :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword file_ids: List of file identifiers. Default value is None. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -4614,7 +4630,8 @@ async def create_vector_store_file_batch( vector_store_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - file_ids: List[str] = _Unset, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any ) -> _models.VectorStoreFileBatch: @@ -4624,8 +4641,10 @@ async def create_vector_store_file_batch( :type vector_store_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. + :keyword file_ids: List of file identifiers. Default value is None. :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -4648,9 +4667,7 @@ async def create_vector_store_file_batch( cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) if body is _Unset: - if file_ids is _Unset: - raise TypeError("missing required argument: file_ids") - body = {"chunking_strategy": chunking_strategy, "file_ids": file_ids} + body = {"chunking_strategy": chunking_strategy, "data_sources": data_sources, "file_ids": file_ids} body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index d9603faf2c35..2771db4188e1 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -2118,6 +2118,7 @@ async def create_vector_store_and_poll( content_type: str = "application/json", file_ids: Optional[List[str]] = None, name: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, @@ -2134,6 +2135,8 @@ async def create_vector_store_and_poll( :paramtype file_ids: list[str] :keyword name: The name of the vector store. Default value is None. :paramtype name: str + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword expires_after: Details on when this vector store expires. Default value is None. :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will @@ -2179,6 +2182,7 @@ async def create_vector_store_and_poll( content_type: str = "application/json", file_ids: Optional[List[str]] = None, name: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, @@ -2194,6 +2198,8 @@ async def create_vector_store_and_poll( :paramtype file_ids: list[str] :keyword name: The name of the vector store. Default value is None. :paramtype name: str + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword expires_after: Details on when this vector store expires. Default value is None. :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will @@ -2214,11 +2220,13 @@ async def create_vector_store_and_poll( if body is not None: vector_store = await self.create_vector_store(body=body, content_type=content_type, **kwargs) - elif file_ids is not None or (name is not None and expires_after is not None): + elif file_ids is not None or data_sources is not None or (name is not None and expires_after is not None): + store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) if data_sources else None vector_store = await self.create_vector_store( content_type=content_type, file_ids=file_ids, name=name, + store_configuration=store_configuration, expires_after=expires_after, chunking_strategy=chunking_strategy, metadata=metadata, @@ -2227,7 +2235,7 @@ async def create_vector_store_and_poll( else: raise ValueError( "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " - "'file_ids', or 'name' and 'expires_after'." + "'file_ids', 'store_configuration', or 'name' and 'expires_after'." ) while vector_store.status == "in_progress": @@ -2269,6 +2277,7 @@ async def create_vector_store_file_batch_and_poll( vector_store_id: str, *, file_ids: List[str], + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, content_type: str = "application/json", chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, sleep_interval: float = 1, @@ -2280,6 +2289,8 @@ async def create_vector_store_file_batch_and_poll( :type vector_store_id: str :keyword file_ids: List of file identifiers. Required. :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -2327,7 +2338,8 @@ async def create_vector_store_file_batch_and_poll( vector_store_id: str, body: Union[JSON, IO[bytes], None] = None, *, - file_ids: List[str] = _Unset, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, sleep_interval: float = 1, **kwargs: Any, @@ -2340,6 +2352,8 @@ async def create_vector_store_file_batch_and_poll( :type body: JSON or IO[bytes] :keyword file_ids: List of file identifiers. Required. :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.client.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -2350,7 +2364,11 @@ async def create_vector_store_file_batch_and_poll( if body is None: vector_store_file_batch = await super().create_vector_store_file_batch( - vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs + vector_store_id=vector_store_id, + file_ids=file_ids, + data_sources=data_sources, + chunking_strategy=chunking_strategy, + **kwargs, ) else: content_type = kwargs.get("content_type", "application/json") @@ -2366,6 +2384,146 @@ async def create_vector_store_file_batch_and_poll( return vector_store_file_batch + @overload + async def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_and_poll( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_sources: Azure asset ID. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_id: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_sources: Azure asset ID. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + if body is None: + vector_store_file = await super().create_vector_store_file( + vector_store_id=vector_store_id, + file_id=file_id, + data_sources=data_sources, + chunking_strategy=chunking_strategy, + **kwargs, + ) + else: + content_type = kwargs.get("content_type", "application/json") + vector_store_file = await super().create_vector_store_file(body=body, content_type=content_type, **kwargs) + + while vector_store_file.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file = await super().get_vector_store_file( + vector_store_id=vector_store_id, file_id=vector_store_file.id + ) + + return vector_store_file + @distributed_trace_async async def get_file_content(self, file_id: str, **kwargs: Any) -> AsyncIterator[bytes]: """ diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py index 353ea686df0a..ad3dbcbfe823 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/__init__.py @@ -33,6 +33,7 @@ EvaluatorConfiguration, FileDeletionStatus, FileListResponse, + FileSearchRankingOptions, FileSearchToolDefinition, FileSearchToolDefinitionDetails, FileSearchToolResource, @@ -141,6 +142,9 @@ VectorStoreAutoChunkingStrategyResponse, VectorStoreChunkingStrategyRequest, VectorStoreChunkingStrategyResponse, + VectorStoreConfiguration, + VectorStoreConfigurations, + VectorStoreDataSource, VectorStoreDeletionStatus, VectorStoreExpirationPolicy, VectorStoreFile, @@ -182,6 +186,7 @@ TruncationStrategy, VectorStoreChunkingStrategyRequestType, VectorStoreChunkingStrategyResponseType, + VectorStoreDataSourceAssetType, VectorStoreExpirationPolicyAnchor, VectorStoreFileBatchStatus, VectorStoreFileErrorCode, @@ -214,6 +219,7 @@ "EvaluatorConfiguration", "FileDeletionStatus", "FileListResponse", + "FileSearchRankingOptions", "FileSearchToolDefinition", "FileSearchToolDefinitionDetails", "FileSearchToolResource", @@ -322,6 +328,9 @@ "VectorStoreAutoChunkingStrategyResponse", "VectorStoreChunkingStrategyRequest", "VectorStoreChunkingStrategyResponse", + "VectorStoreConfiguration", + "VectorStoreConfigurations", + "VectorStoreDataSource", "VectorStoreDeletionStatus", "VectorStoreExpirationPolicy", "VectorStoreFile", @@ -360,6 +369,7 @@ "TruncationStrategy", "VectorStoreChunkingStrategyRequestType", "VectorStoreChunkingStrategyResponseType", + "VectorStoreDataSourceAssetType", "VectorStoreExpirationPolicyAnchor", "VectorStoreFileBatchStatus", "VectorStoreFileErrorCode", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py index bbb0b7729939..efde56d8624d 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py @@ -433,6 +433,16 @@ class VectorStoreChunkingStrategyResponseType(str, Enum, metaclass=CaseInsensiti STATIC = "static" +class VectorStoreDataSourceAssetType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of vector storage asset. Asset type may be a uri_asset, in this case it should contain + asset URI ID, + in the case of id_asset it should contain the data ID. + """ + + URI_ASSET = "uri_asset" + ID_ASSET = "id_asset" + + class VectorStoreExpirationPolicyAnchor(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Describes the relationship between the days and the expiration of this vector store.""" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index f1a362ba8448..9a4480ed227d 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -631,18 +631,24 @@ class CodeInterpreterToolResource(_model_base.Model): be a maximum of 20 files associated with the tool. :vartype file_ids: list[str] + :ivar data_sources: The data sources to be used. This option is mutually exclusive with + fileIds. + :vartype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] """ file_ids: Optional[List[str]] = rest_field() """A list of file IDs made available to the ``code_interpreter`` tool. There can be a maximum of 20 files associated with the tool.""" + data_sources: Optional[List["_models.VectorStoreDataSource"]] = rest_field() + """The data sources to be used. This option is mutually exclusive with fileIds.""" @overload def __init__( self, *, file_ids: Optional[List[str]] = None, + data_sources: Optional[List["_models.VectorStoreDataSource"]] = None, ) -> None: ... @overload @@ -1046,6 +1052,40 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: self.object: Literal["list"] = "list" +class FileSearchRankingOptions(_model_base.Model): + """Ranking options for file search. + + + :ivar ranker: File search ranker. Required. + :vartype ranker: str + :ivar score_threshold: Ranker search threshold. Required. + :vartype score_threshold: float + """ + + ranker: str = rest_field() + """File search ranker. Required.""" + score_threshold: float = rest_field() + """Ranker search threshold. Required.""" + + @overload + def __init__( + self, + *, + ranker: str, + score_threshold: float, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + class FileSearchToolDefinition(ToolDefinition, discriminator="file_search"): """The input definition information for a file search tool as used to configure an agent. @@ -1090,6 +1130,8 @@ class FileSearchToolDefinitionDetails(_model_base.Model): Note that the file search tool may output fewer than ``max_num_results`` results. See the file search tool documentation for more information. :vartype max_num_results: int + :ivar ranking_options: + :vartype ranking_options: ~azure.ai.projects.models.FileSearchRankingOptions """ max_num_results: Optional[int] = rest_field() @@ -1098,12 +1140,14 @@ class FileSearchToolDefinitionDetails(_model_base.Model): Note that the file search tool may output fewer than ``max_num_results`` results. See the file search tool documentation for more information.""" + ranking_options: Optional["_models.FileSearchRankingOptions"] = rest_field() @overload def __init__( self, *, max_num_results: Optional[int] = None, + ranking_options: Optional["_models.FileSearchRankingOptions"] = None, ) -> None: ... @overload @@ -1124,17 +1168,27 @@ class FileSearchToolResource(_model_base.Model): maximum of 1 vector store attached to the agent. :vartype vector_store_ids: list[str] + :ivar vector_stores: The list of vector store configuration objects from Azure. This list is + limited to one + element. The only element of this list contains + the list of azure asset IDs used by the search tool. + :vartype vector_stores: list[~azure.ai.projects.models.VectorStoreConfigurations] """ vector_store_ids: Optional[List[str]] = rest_field() """The ID of the vector store attached to this agent. There can be a maximum of 1 vector store attached to the agent.""" + vector_stores: Optional[List["_models.VectorStoreConfigurations"]] = rest_field() + """The list of vector store configuration objects from Azure. This list is limited to one + element. The only element of this list contains + the list of azure asset IDs used by the search tool.""" @overload def __init__( self, *, vector_store_ids: Optional[List[str]] = None, + vector_stores: Optional[List["_models.VectorStoreConfigurations"]] = None, ) -> None: ... @overload @@ -1388,12 +1442,13 @@ class InternalConnectionPropertiesAADAuth(InternalConnectionProperties, discrimi :ivar target: The connection URL to be used for this service. Required. :vartype target: str :ivar auth_type: Authentication type of the connection target. Required. Entra ID - authentication - :vartype auth_type: str or ~azure.ai.projects.models.AAD + authentication (formerly known as AAD) + :vartype auth_type: str or ~azure.ai.projects.models.ENTRA_ID """ auth_type: Literal[AuthenticationType.ENTRA_ID] = rest_discriminator(name="authType") # type: ignore - """Authentication type of the connection target. Required. Entra ID authentication""" + """Authentication type of the connection target. Required. Entra ID authentication (formerly known + as AAD)""" class InternalConnectionPropertiesApiKeyAuth(InternalConnectionProperties, discriminator="ApiKey"): @@ -1456,15 +1511,19 @@ class MessageAttachment(_model_base.Model): """This describes to which tools a file has been attached. - :ivar file_id: The ID of the file to attach to the message. Required. + :ivar file_id: The ID of the file to attach to the message. :vartype file_id: str + :ivar data_sources: Azure asset ID. + :vartype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :ivar tools: The tools to add to this file. Required. :vartype tools: list[~azure.ai.projects.models.CodeInterpreterToolDefinition or ~azure.ai.projects.models.FileSearchToolDefinition] """ - file_id: str = rest_field() - """The ID of the file to attach to the message. Required.""" + file_id: Optional[str] = rest_field() + """The ID of the file to attach to the message.""" + data_sources: Optional[List["_models.VectorStoreDataSource"]] = rest_field() + """Azure asset ID.""" tools: List["_types.MessageAttachmentToolDefinition"] = rest_field() """The tools to add to this file. Required.""" @@ -1472,8 +1531,9 @@ class MessageAttachment(_model_base.Model): def __init__( self, *, - file_id: str, tools: List["_types.MessageAttachmentToolDefinition"], + file_id: Optional[str] = None, + data_sources: Optional[List["_models.VectorStoreDataSource"]] = None, ) -> None: ... @overload @@ -5613,6 +5673,107 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, type=VectorStoreChunkingStrategyResponseType.OTHER, **kwargs) +class VectorStoreConfiguration(_model_base.Model): + """Vector storage configuration is the list of data sources, used when multiple + files can be used for the enterprise file search. + + + :ivar data_sources: Data sources. Required. + :vartype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + """ + + data_sources: List["_models.VectorStoreDataSource"] = rest_field() + """Data sources. Required.""" + + @overload + def __init__( + self, + *, + data_sources: List["_models.VectorStoreDataSource"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreConfigurations(_model_base.Model): + """The structure, containing the list of vector storage configurations i.e. the list of azure + asset IDs. + + + :ivar store_name: Name. Required. + :vartype store_name: str + :ivar store_configuration: Configurations. Required. + :vartype store_configuration: ~azure.ai.projects.models.VectorStoreConfiguration + """ + + store_name: str = rest_field(name="name") + """Name. Required.""" + store_configuration: "_models.VectorStoreConfiguration" = rest_field(name="configuration") + """Configurations. Required.""" + + @overload + def __init__( + self, + *, + store_name: str, + store_configuration: "_models.VectorStoreConfiguration", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorStoreDataSource(_model_base.Model): + """The structure, containing Azure asset URI path and the asset type of the file used as a data + source + for the enterprise file search. + + + :ivar asset_identifier: Asset URI. Required. + :vartype asset_identifier: str + :ivar asset_type: The asset type *. Required. Known values are: "uri_asset" and "id_asset". + :vartype asset_type: str or ~azure.ai.projects.models.VectorStoreDataSourceAssetType + """ + + asset_identifier: str = rest_field(name="uri") + """Asset URI. Required.""" + asset_type: Union[str, "_models.VectorStoreDataSourceAssetType"] = rest_field(name="type") + """The asset type *. Required. Known values are: \"uri_asset\" and \"id_asset\".""" + + @overload + def __init__( + self, + *, + asset_identifier: str, + asset_type: Union[str, "_models.VectorStoreDataSourceAssetType"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + class VectorStoreDeletionStatus(_model_base.Model): """Response object for deleting a vector store. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 2fc29572a785..c407605ec666 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -1,5 +1,4 @@ # pylint: disable=too-many-lines -# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -26,7 +25,6 @@ SubmitToolOutputsAction, ThreadRun, RunStep, - ThreadMessage, RunStepDeltaChunk, FunctionToolDefinition, FunctionDefinition, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index d0cb49c291b1..4eaf33658311 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -5089,6 +5089,7 @@ def create_vector_store( content_type: str = "application/json", file_ids: Optional[List[str]] = None, name: Optional[str] = None, + store_configuration: Optional[_models.VectorStoreConfiguration] = None, expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, @@ -5104,6 +5105,9 @@ def create_vector_store( :paramtype file_ids: list[str] :keyword name: The name of the vector store. Default value is None. :paramtype name: str + :keyword store_configuration: The vector store configuration, used when vector store is created + from Azure asset URIs. Default value is None. + :paramtype store_configuration: ~azure.ai.projects.models.VectorStoreConfiguration :keyword expires_after: Details on when this vector store expires. Default value is None. :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will @@ -5142,6 +5146,7 @@ def create_vector_store( *, file_ids: Optional[List[str]] = None, name: Optional[str] = None, + store_configuration: Optional[_models.VectorStoreConfiguration] = None, expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, @@ -5156,6 +5161,9 @@ def create_vector_store( :paramtype file_ids: list[str] :keyword name: The name of the vector store. Default value is None. :paramtype name: str + :keyword store_configuration: The vector store configuration, used when vector store is created + from Azure asset URIs. Default value is None. + :paramtype store_configuration: ~azure.ai.projects.models.VectorStoreConfiguration :keyword expires_after: Details on when this vector store expires. Default value is None. :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will @@ -5187,6 +5195,7 @@ def create_vector_store( if body is _Unset: body = { "chunking_strategy": chunking_strategy, + "configuration": store_configuration, "expires_after": expires_after, "file_ids": file_ids, "metadata": metadata, @@ -5663,8 +5672,9 @@ def create_vector_store_file( self, vector_store_id: str, *, - file_id: str, content_type: str = "application/json", + file_id: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any ) -> _models.VectorStoreFile: @@ -5672,11 +5682,13 @@ def create_vector_store_file( :param vector_store_id: Identifier of the vector store. Required. :type vector_store_id: str - :keyword file_id: Identifier of the file. Required. - :paramtype file_id: str :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_sources: Azure asset ID. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -5709,7 +5721,8 @@ def create_vector_store_file( vector_store_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - file_id: str = _Unset, + file_id: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any ) -> _models.VectorStoreFile: @@ -5719,8 +5732,10 @@ def create_vector_store_file( :type vector_store_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword file_id: Identifier of the file. Required. + :keyword file_id: Identifier of the file. Default value is None. :paramtype file_id: str + :keyword data_sources: Azure asset ID. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -5743,9 +5758,7 @@ def create_vector_store_file( cls: ClsType[_models.VectorStoreFile] = kwargs.pop("cls", None) if body is _Unset: - if file_id is _Unset: - raise TypeError("missing required argument: file_id") - body = {"chunking_strategy": chunking_strategy, "file_id": file_id} + body = {"chunking_strategy": chunking_strategy, "data_sources": data_sources, "file_id": file_id} body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None @@ -5962,8 +5975,9 @@ def create_vector_store_file_batch( self, vector_store_id: str, *, - file_ids: List[str], content_type: str = "application/json", + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any ) -> _models.VectorStoreFileBatch: @@ -5971,11 +5985,13 @@ def create_vector_store_file_batch( :param vector_store_id: Identifier of the vector store. Required. :type vector_store_id: str - :keyword file_ids: List of file identifiers. Required. - :paramtype file_ids: list[str] :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str + :keyword file_ids: List of file identifiers. Default value is None. + :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -6008,7 +6024,8 @@ def create_vector_store_file_batch( vector_store_id: str, body: Union[JSON, IO[bytes]] = _Unset, *, - file_ids: List[str] = _Unset, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, **kwargs: Any ) -> _models.VectorStoreFileBatch: @@ -6018,8 +6035,10 @@ def create_vector_store_file_batch( :type vector_store_id: str :param body: Is either a JSON type or a IO[bytes] type. Required. :type body: JSON or IO[bytes] - :keyword file_ids: List of file identifiers. Required. + :keyword file_ids: List of file identifiers. Default value is None. :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest @@ -6042,9 +6061,7 @@ def create_vector_store_file_batch( cls: ClsType[_models.VectorStoreFileBatch] = kwargs.pop("cls", None) if body is _Unset: - if file_ids is _Unset: - raise TypeError("missing required argument: file_ids") - body = {"chunking_strategy": chunking_strategy, "file_ids": file_ids} + body = {"chunking_strategy": chunking_strategy, "data_sources": data_sources, "file_ids": file_ids} body = {k: v for k, v in body.items() if v is not None} content_type = content_type or "application/json" _content = None diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index b50ec0594c39..62ad9f7bb06d 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -1,5 +1,4 @@ # pylint: disable=too-many-lines -# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -2230,6 +2229,7 @@ def create_vector_store_and_poll( *, content_type: str = "application/json", file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, name: Optional[str] = None, expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, @@ -2247,6 +2247,8 @@ def create_vector_store_and_poll( :paramtype file_ids: list[str] :keyword name: The name of the vector store. Default value is None. :paramtype name: str + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword expires_after: Details on when this vector store expires. Default value is None. :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will @@ -2292,6 +2294,7 @@ def create_vector_store_and_poll( content_type: str = "application/json", file_ids: Optional[List[str]] = None, name: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, expires_after: Optional[_models.VectorStoreExpirationPolicy] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, metadata: Optional[Dict[str, str]] = None, @@ -2307,6 +2310,8 @@ def create_vector_store_and_poll( :paramtype file_ids: list[str] :keyword name: The name of the vector store. Default value is None. :paramtype name: str + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] :keyword expires_after: Details on when this vector store expires. Default value is None. :paramtype expires_after: ~azure.ai.projects.models.VectorStoreExpirationPolicy :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will @@ -2327,10 +2332,12 @@ def create_vector_store_and_poll( if body is not None: vector_store = self.create_vector_store(body=body, content_type=content_type, **kwargs) - elif file_ids is not None or (name is not None and expires_after is not None): + elif file_ids is not None or data_sources is not None or (name is not None and expires_after is not None): + store_configuration = _models.VectorStoreConfiguration(data_sources=data_sources) if data_sources else None vector_store = self.create_vector_store( content_type=content_type, file_ids=file_ids, + store_configuration=store_configuration, name=name, expires_after=expires_after, chunking_strategy=chunking_strategy, @@ -2340,7 +2347,7 @@ def create_vector_store_and_poll( else: raise ValueError( "Invalid parameters for create_vector_store_and_poll. Please provide either 'body', " - "'file_ids', or 'name' and 'expires_after'." + "'file_ids', 'store_configuration', or 'name' and 'expires_after'." ) while vector_store.status == "in_progress": @@ -2381,7 +2388,8 @@ def create_vector_store_file_batch_and_poll( self, vector_store_id: str, *, - file_ids: List[str], + file_ids: Optional[List[str]] = None, + store_configuration: Optional[_models.VectorStoreConfiguration] = None, content_type: str = "application/json", chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, sleep_interval: float = 1, @@ -2393,6 +2401,9 @@ def create_vector_store_file_batch_and_poll( :type vector_store_id: str :keyword file_ids: List of file identifiers. Required. :paramtype file_ids: list[str] + :keyword store_configuration: The vector store configuration, used when vector store is created + from Azure asset ID. Default value is None. + :paramtype store_configuration:~azure.ai.projects.VectorStorageConfiguration :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. Default value is "application/json". :paramtype content_type: str @@ -2440,7 +2451,8 @@ def create_vector_store_file_batch_and_poll( vector_store_id: str, body: Union[JSON, IO[bytes], None] = None, *, - file_ids: List[str] = _Unset, + file_ids: Optional[List[str]] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, sleep_interval: float = 1, **kwargs: Any, @@ -2453,9 +2465,14 @@ def create_vector_store_file_batch_and_poll( :type body: JSON or IO[bytes] :keyword file_ids: List of file identifiers. Required. :paramtype file_ids: list[str] + :keyword data_sources: List of Azure assets. Default value is None. + :paramtype data_sources: list[~azure.ai.client.project.VectorStoreDataSource] :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Default value is None. :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float :return: VectorStoreFileBatch. The VectorStoreFileBatch is compatible with MutableMapping :rtype: ~azure.ai.projects.models.VectorStoreFileBatch :raises ~azure.core.exceptions.HttpResponseError: @@ -2463,7 +2480,11 @@ def create_vector_store_file_batch_and_poll( if body is None: vector_store_file_batch = super().create_vector_store_file_batch( - vector_store_id=vector_store_id, file_ids=file_ids, chunking_strategy=chunking_strategy, **kwargs + vector_store_id=vector_store_id, + file_ids=file_ids, + data_sources=data_sources, + chunking_strategy=chunking_strategy, + **kwargs, ) else: content_type = kwargs.get("content_type", "application/json") @@ -2585,6 +2606,146 @@ def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str logger.error(f"An error occurred in save_file: {e}") raise + @overload + def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: JSON, + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_and_poll( + self, + vector_store_id: str, + *, + content_type: str = "application/json", + file_id: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_sources: Azure asset ID. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: IO[bytes], + *, + content_type: str = "application/json", + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Required. + :type body: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_vector_store_file_and_poll( + self, + vector_store_id: str, + body: Union[JSON, IO[bytes]] = _Unset, + *, + file_id: Optional[str] = None, + data_sources: Optional[List[_models.VectorStoreDataSource]] = None, + chunking_strategy: Optional[_models.VectorStoreChunkingStrategyRequest] = None, + sleep_interval: float = 1, + **kwargs: Any, + ) -> _models.VectorStoreFile: + """Create a vector store file by attaching a file to a vector store. + + :param vector_store_id: Identifier of the vector store. Required. + :type vector_store_id: str + :param body: Is either a JSON type or a IO[bytes] type. Required. + :type body: JSON or IO[bytes] + :keyword file_id: Identifier of the file. Default value is None. + :paramtype file_id: str + :keyword data_sources: Azure asset ID. Default value is None. + :paramtype data_sources: list[~azure.ai.projects.models.VectorStoreDataSource] + :keyword chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will + use the auto strategy. Default value is None. + :paramtype chunking_strategy: ~azure.ai.projects.models.VectorStoreChunkingStrategyRequest + :keyword sleep_interval: Time to wait before polling for the status of the vector store. Default value + is 1. + :paramtype sleep_interval: float + :return: VectorStoreFile. The VectorStoreFile is compatible with MutableMapping + :rtype: ~azure.ai.projects.models.VectorStoreFile + :raises ~azure.core.exceptions.HttpResponseError: + """ + if body is None: + vector_store_file = super().create_vector_store_file( + vector_store_id=vector_store_id, + file_id=file_id, + data_sources=data_sources, + chunking_strategy=chunking_strategy, + **kwargs, + ) + else: + content_type = kwargs.get("content_type", "application/json") + vector_store_file = super().create_vector_store_file(body=body, content_type=content_type, **kwargs) + + while vector_store_file.status == "in_progress": + time.sleep(sleep_interval) + vector_store_file = super().get_vector_store_file( + vector_store_id=vector_store_id, file_id=vector_store_file.id + ) + + return vector_store_file + __all__: List[str] = [ "AgentsOperations", diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py index d2557ab74a23..1cb89d70ac56 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py @@ -1,4 +1,7 @@ # pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -301,7 +304,7 @@ def _add_message_event( message_id: Optional[str] = None, thread_run_id: Optional[str] = None, message_status: Optional[str] = None, - incomplete_details: Optional[MessageIncompleteDetails] = None, + incomplete_details: Optional[str] = None, usage: Optional[_models.RunStepCompletionUsage] = None, ) -> None: # TODO document new fields diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py new file mode 100644 index 000000000000..d3c36363d4ba --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py @@ -0,0 +1,86 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_code_interpreter_attachment_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with code interpreter from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_code_interpreter_attachment_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import os +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import CodeInterpreterTool +from azure.ai.projects.models import FilePurpose +from azure.ai.projects.models import MessageAttachment +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables +async def main(): + project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + # upload a file and wait for it to be processed + file = await project_client.agents.upload_file_and_poll( + file_path="../product_info_1.md", purpose=FilePurpose.AGENTS + ) + print(f"Uploaded file, file ID: {file.id}") + + code_interpreter = CodeInterpreterTool() + + # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # create a message with the attachment + attachment = MessageAttachment(file_id=file.id, tools=code_interpreter.definitions) + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + await project_client.agents.delete_file(file.id) + print("Deleted file") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py new file mode 100644 index 000000000000..5dfc0432ed0c --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py @@ -0,0 +1,88 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +FILE: sample_agents_code_interpreter_attachment_enterprise_search_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with code interpreter from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_code_interpreter_attachment_enterprise_search_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import os + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import ( + CodeInterpreterTool, + MessageAttachment, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + +async def main(): + credential = DefaultAzureCredential() + project_client = AIProjectClient.from_connection_string( + credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + + code_interpreter = CodeInterpreterTool() + + # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # We will upload the local file to Azure and will use it for vector store creation. + _, asset_uri = project_client.upload_file("../product_info_1.md") + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + + # create a message with the attachment + attachment = MessageAttachment(data_sources=[ds], tools=code_interpreter.definitions) + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py new file mode 100644 index 000000000000..afaf45fa5bf8 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py @@ -0,0 +1,108 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +FILE: sample_agents_vector_store_batch_enterprise_file_search_async.py + +DESCRIPTION: + This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_vector_store_batch_enterprise_file_search_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity azure-ai-ml + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import os + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType +from azure.identity import DefaultAzureCredential + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + + credential = DefaultAzureCredential() + project_client = AIProjectClient.from_connection_string( + credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + + # We will upload the local file to Azure and will use it for vector store creation. + _, asset_uri = project_client.upload_file("../product_info_1.md") + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + vector_store = await project_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # add the file to the vector store or you can supply file ids in the vector store creation + vector_store_file_batch = await project_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + await file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + await project_client.agents.update_agent( + assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + ) + print(f"Updated agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + await project_client.agents.delete_vector_store(vector_store.id) + print("Deleted vectore store") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py new file mode 100644 index 000000000000..16f036e1a95a --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py @@ -0,0 +1,83 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +FILE: sample_agents_vector_store_enterprise_file_search_async.py + +DESCRIPTION: + This sample demonstrates how to add files to agent during the vector store creation. + +USAGE: + python sample_agents_vector_store_enterprise_file_search_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity azure-ai-ml + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import os + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType +from azure.identity import DefaultAzureCredential + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + credential = DefaultAzureCredential() + project_client = AIProjectClient.from_connection_string( + credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + + # We will upload the local file to Azure and will use it for vector store creation. + _, asset_uri = project_client.upload_file("../product_info_1.md") + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + vector_store = await project_client.agents.create_vector_store_and_poll( + data_sources=[ds], name="sample_vector_store" + ) + print(f"Created vector store, vector store ID: {vector_store.id}") + + # create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + await project_client.agents.delete_vector_store(vector_store.id) + print("Deleted vectore store") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py new file mode 100644 index 000000000000..f06151be4359 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py @@ -0,0 +1,85 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +FILE: sample_agents_vector_store_file_search_async.py + +DESCRIPTION: + This sample demonstrates how to add files to agent during the vector store creation. + +USAGE: + python sample_agents_vector_store_file_search_async.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" +import asyncio +import os + +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import FileSearchTool, FilePurpose +from azure.identity import DefaultAzureCredential + + +async def main(): + # Create an Azure AI Client from a connection string, copied from your AI Studio project. + # At the moment, it should be in the format ";;;" + # Customer needs to login to Azure subscription via Azure CLI and set the environment variables + credential = DefaultAzureCredential() + project_client = AIProjectClient.from_connection_string( + credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] + ) + + async with project_client: + + # upload a file and wait for it to be processed + file = await project_client.agents.upload_file_and_poll( + file_path="../product_info_1.md", purpose=FilePurpose.AGENTS + ) + print(f"Uploaded file, file ID: {file.id}") + + # create a vector store with no file and wait for it to be processed + vector_store = await project_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + agent = await project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = await project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = await project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + await project_client.agents.delete_vector_store(vector_store.id) + print("Deleted vectore store") + + await project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = await project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py new file mode 100644 index 000000000000..922bdff36b30 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py @@ -0,0 +1,81 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +FILE: sample_agents_code_interpreter_attachment_enterprise_search.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with code interpreter from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_code_interpreter_attachment_enterprise_search.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import ( + CodeInterpreterTool, + MessageAttachment, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +credential = DefaultAzureCredential() +project_client = AIProjectClient.from_connection_string( + credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + + code_interpreter = CodeInterpreterTool() + + # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + # We will upload the local file to Azure and will use it for vector store creation. + _, asset_uri = project_client.upload_file("./product_info_1.md") + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + + # create a message with the attachment + attachment = MessageAttachment(data_sources=[ds], tools=code_interpreter.definitions) + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + # Check if you got "Rate limit is exceeded.", then you want to get more quota + print(f"Run failed: {run.last_error}") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py new file mode 100644 index 000000000000..d06618c8e7c7 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py @@ -0,0 +1,78 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +FILE: sample_agents_enterprise_file_search.py + +DESCRIPTION: + This sample demonstrates how to add files to agent during the vector store creation. + +USAGE: + python sample_agents_enterprise_file_search.py + + Before running the sample: + + pip install azure.ai.projects azure-identity azure-ai-ml + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +credential = DefaultAzureCredential() +project_client = AIProjectClient.from_connection_string( + credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + + # We will upload the local file to Azure and will use it for vector store creation. + _, asset_uri = project_client.upload_file("./product_info_1.md") + + # create a vector store with no file and wait for it to be processed + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + vector_store = project_client.agents.create_vector_store_and_poll(data_sources=[ds], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + project_client.agents.delete_vector_store(vector_store.id) + print("Deleted vectore store") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py index c4e7ecf71f5a..ca53f180e32e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py @@ -12,7 +12,7 @@ View the results in the "Tracing" tab in your Azure AI Studio project page. USAGE: - python sample_agents_basics_with_azure_monitor_tracing.py + python sample_agents_functions_with_azure_monitor_tracing.py Before running the sample: diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py index a82adb3823b6..031314cfa944 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py @@ -11,7 +11,7 @@ the Azure Agents service using a synchronous client with tracing to console. USAGE: - python sample_agents_basics_with_console_tracing.py + python sample_agents_functions_with_console_tracing.py Before running the sample: diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_enterprise_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_enterprise_file_search.py new file mode 100644 index 000000000000..e84648f06175 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_enterprise_file_search.py @@ -0,0 +1,103 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +FILE: sample_agents_vector_store_batch_enterprise_file_search.py + +DESCRIPTION: + This sample demonstrates how to create the vector store with the list of files. + +USAGE: + python sample_agents_vector_store_batch_enterprise_file_search.py + + Before running the sample: + + pip install azure.ai.projects azure-identity azure-ai-ml + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +credential = DefaultAzureCredential() +project_client = AIProjectClient.from_connection_string( + credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + + # We will upload the local file to Azure and will use it for vector store creation. + _, asset_uri = project_client.upload_file("./product_info_1.md") + + # create a vector store with no file and wait for it to be processed + vector_store = project_client.agents.create_vector_store_and_poll(data_sources=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + # add the file to the vector store or you can supply data sources in the vector store creation + vector_store_file_batch = project_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] + ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") + + # create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + file_search_tool.remove_vector_store(vector_store.id) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + + project_client.agents.update_agent( + assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + ) + print(f"Updated agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + project_client.agents.delete_vector_store(vector_store.id) + print("Deleted vector store") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py index d3d83045876a..581deaf48cc3 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_file_search.py @@ -2,16 +2,15 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ - """ -FILE: sample_agents_vector_store_batch_file_search_async.py +FILE: sample_agents_vector_store_batch_file_search.py DESCRIPTION: This sample demonstrates how to use agent operations to add files to an existing vector store and perform search from the Azure Agents service using a synchronous client. USAGE: - python sample_agents_vector_store_batch_file_search_async.py + python sample_agents_vector_store_batch_file_search.py Before running the sample: @@ -42,7 +41,7 @@ print(f"Uploaded file, file ID: {file.id}") # create a vector store with no file and wait for it to be processed - vector_store = project_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + vector_store = project_client.agents.create_vector_store_and_poll(data_sources=[], name="sample_vector_store") print(f"Created vector store, vector store ID: {vector_store.id}") # add the file to the vector store or you can supply file ids in the vector store creation @@ -96,9 +95,6 @@ run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Created run, run ID: {run.id}") - project_client.agents.delete_file(file.id) - print("Deleted file") - project_client.agents.delete_vector_store(vector_store.id) print("Deleted vector store") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py new file mode 100644 index 000000000000..a019fd96daf0 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py @@ -0,0 +1,79 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +""" +FILE: sample_agents_vector_store_file_search.py + +DESCRIPTION: + This sample demonstrates how to add files to agent during the vector store creation. + +USAGE: + python sample_agents_vector_store_file_search.py + + Before running the sample: + + pip install azure.ai.projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os + +from azure.ai.projects import AIProjectClient +from azure.ai.projects.models import FileSearchTool, FilePurpose +from azure.identity import DefaultAzureCredential + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +credential = DefaultAzureCredential() +project_client = AIProjectClient.from_connection_string( + credential=credential, conn_str=os.environ["PROJECT_CONNECTION_STRING"] +) + +with project_client: + + # upload a file and wait for it to be processed + file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose=FilePurpose.AGENTS) + print(f"Uploaded file, file ID: {file.id}") + + # create a vector store with no file and wait for it to be processed + vector_store = project_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + print(f"Created vector store, vector store ID: {vector_store.id}") + + # create a file search tool + file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + + # notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, + ) + print(f"Created agent, agent ID: {agent.id}") + + thread = project_client.agents.create_thread() + print(f"Created thread, thread ID: {thread.id}") + + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + ) + print(f"Created message, message ID: {message.id}") + + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Created run, run ID: {run.id}") + + project_client.agents.delete_vector_store(vector_store.id) + print("Deleted vectore store") + + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py index 0ead6693e295..ee490f846797 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py @@ -37,7 +37,7 @@ async def main(): ) # Upload data for evaluation - data_id = project_client.upload_file("./evaluate_test_data.jsonl") + data_id, _ = project_client.upload_file("./evaluate_test_data.jsonl") default_connection = await project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py index 223968489d63..435e43875821 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py @@ -37,7 +37,7 @@ ) # Upload data for evaluation -data_id = project_client.upload_file("./evaluate_test_data.jsonl") +data_id, _ = project_client.upload_file("./evaluate_test_data.jsonl") default_connection = project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) diff --git a/sdk/ai/azure-ai-projects/tests/README.md b/sdk/ai/azure-ai-projects/tests/README.md index 704a2cd723ea..acda18e41581 100644 --- a/sdk/ai/azure-ai-projects/tests/README.md +++ b/sdk/ai/azure-ai-projects/tests/README.md @@ -29,10 +29,10 @@ az login ``` ## Setup for running tests in the `agents` folder + **Note:** The environment variables required by the test are defined in `agentClientPreparer`. **It is important project name to be the part of environment variable!** For example, the project is `azure_ai_projects` and the variable may be called `azure_ai_projects_connection_string`. The variables without `azure_ai_projects` substrings will be ignored according to logic of `EnvironmentVariableLoader`. The values of these variables will be supplied to kwargs of the unit tests, decorated by `EnvironmentVariableLoader` function. ```bash -set PROJECT_CONNECTION_STRING_AGENTS_TESTS= set AZURE_AI_PROJECTS_CONNECTION_STRING= set AZURE_AI_PROJECTS_DATA_PATH= ``` diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index e4683958e440..093245923d15 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -1,35 +1,48 @@ # pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines # # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ # cSpell:disable +from typing import Optional + import os +import datetime import json +import logging import tempfile +import sys import time +import pytest import functools -import datetime -import logging -import sys from azure.ai.projects import AIProjectClient +from azure.core.pipeline.transport import RequestsTransport +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy from azure.ai.projects.models import ( - FunctionTool, CodeInterpreterTool, - FileSearchTool, - ToolSet, CodeInterpreterToolResource, + FilePurpose, + FileSearchTool, FileSearchToolResource, - ToolResources, + FunctionTool, + MessageAttachment, OpenAIFile, - FilePurpose, + ThreadMessageOptions, + ToolResources, + ToolSet, + VectorStore, + VectorStoreAzureConfigurations, + VectorStorageConfiguration, + VectorStorageDataSource, + VectorStorageDataSourceAssetType, ) -from azure.core.pipeline.transport import RequestsTransport -from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy -from azure.core.exceptions import AzureError, ServiceRequestError, HttpResponseError -from azure.ai.projects.models import FunctionTool -from azure.identity import DefaultAzureCredential + # TODO clean this up / get rid of anything not in use @@ -57,9 +70,9 @@ agentClientPreparer = functools.partial( EnvironmentVariableLoader, - "azure_ai_project", - # cSpell:disable-next-line + "azure_ai_projects", azure_ai_projects_connection_string="https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", + azure_ai_projects_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", ) """ agentClientPreparer = functools.partial( @@ -119,6 +132,10 @@ def create_client(self, **kwargs): return client + def _get_data_file(self) -> str: + """Return the test file name.""" + return os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") + # for debugging purposes: if a test fails and its agent has not been deleted, it will continue to show up in the agents list """ # NOTE: this test should not be run against a shared resource, as it will delete all agents @@ -1171,6 +1188,468 @@ def test_create_agent_with_invalid_file_search_tool_resource(self, **kwargs): == "Tools must contain a FileSearchToolDefinition when tool_resources.file_search is provided" ) + @agentClientPreparer() + @recorded_by_proxy + def test_create_vector_store_azure(self, **kwargs): + """Test the agent with vector store creation.""" + self._do_test_create_vector_store(**kwargs) + + @agentClientPreparer() + @recorded_by_proxy + def test_create_vector_store_file_id(self, **kwargs): + """Test the agent with vector store creation.""" + self._do_test_create_vector_store(file_path=self._get_data_file(), **kwargs) + + def _do_test_create_vector_store(self, **kwargs): + """Test the agent with vector store creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + file_ids = [file_id] if file_id else None + if file_ids: + ds = None + else: + ds = [ + VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = ai_client.agents.create_vector_store_and_poll( + file_ids=file_ids, data_sources=ds, name="my_vectorstore" + ) + assert vector_store.id + self._test_file_search(ai_client, vector_store, file_id) + + @agentClientPreparer() + @recorded_by_proxy + def test_vector_store_threads_file_search_azure(self, **kwargs): + """Test file search when azure asset ids are sopplied during thread creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + ds = [ + VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + ) + ] + fs = FileSearchToolResource( + vector_stores=[ + VectorStoreAzureConfigurations( + store_name="my_vector_store", store_configuration=VectorStorageConfiguration(data_sources=ds) + ) + ] + ) + file_search = FileSearchTool() + agent = ai_client.agents.create_agent( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert agent.id + + thread = ai_client.agents.create_thread(tool_resources=ToolResources(file_search=fs)) + assert thread.id + # create message + message = ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = ai_client.agents.list_messages(thread.id) + assert len(messages) + ai_client.agents.delete_agent(agent.id) + ai_client.close() + + @agentClientPreparer() + @recorded_by_proxy + def test_create_vector_store_add_file_file_id(self, **kwargs): + """Test adding single file to vector store withn file ID.""" + self._do_test_create_vector_store_add_file(file_path=self._get_data_file(), **kwargs) + + @agentClientPreparer() + @pytest.mark.skip("The CreateVectorStoreFile API is not supported yet.") + @recorded_by_proxy + def test_create_vector_store_add_file_azure(self, **kwargs): + """Test adding single file to vector store with azure asset ID.""" + self._do_test_create_vector_store_add_file(**kwargs) + + def _do_test_create_vector_store_add_file(self, **kwargs): + """Test adding single file to vector store.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + if file_id: + ds = None + else: + ds = [VectorStorageDataSource(storage_uri=kwargs["azure_ai_projects_data_path"], asset_type="uri_asset")] + vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + assert vector_store.id + vector_store_file = ai_client.agents.create_vector_store_file( + vector_store_id=vector_store.id, data_sources=ds, file_id=file_id + ) + assert vector_store_file.id + self._test_file_search(ai_client, vector_store, file_id) + + @agentClientPreparer() + @recorded_by_proxy + def test_create_vector_store_batch_file_ids(self, **kwargs): + """Test adding multiple files to vector store with file IDs.""" + self._do_test_create_vector_store_batch(file_path=self._get_data_file(), **kwargs) + + @agentClientPreparer() + @pytest.mark.skip("The CreateFileBatch API is not supported yet.") + @recorded_by_proxy + def test_create_vector_store_batch_azure(self, **kwargs): + """Test adding multiple files to vector store with azure asset IDs.""" + self._do_test_create_vector_store_batch(**kwargs) + + def _do_test_create_vector_store_batch(self, **kwargs): + """Test the agent with vector store creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + if file_id: + file_ids = [file_id] + ds = None + else: + file_ids = None + ds = [ + VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + assert vector_store.id + vector_store_file_batch = ai_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids + ) + assert vector_store_file_batch.id + self._test_file_search(ai_client, vector_store, file_id) + + def _test_file_search( + self, + ai_client: AIProjectClient, + vector_store: VectorStore, + file_id: Optional[str], + ) -> None: + """Test the file search""" + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + agent = ai_client.agents.create_agent( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert agent.id + + thread = ai_client.agents.create_thread() + assert thread.id + + # create message + message = ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + ai_client.agents.delete_vector_store(vector_store.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = ai_client.agents.list_messages(thread.id) + assert len(messages) + ai_client.agents.delete_agent(agent.id) + self._remove_file_maybe(file_id, ai_client) + ai_client.close() + + @agentClientPreparer() + @pytest.mark.skip("The CreateFileBatch API is not supported yet.") + @recorded_by_proxy + def test_message_attachement_azure(self, **kwargs): + """Test message attachment with azure ID.""" + ds = VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ) + self._do_test_message_attachment(data_sources=[ds], **kwargs) + + @agentClientPreparer() + @recorded_by_proxy + def test_message_attachement_file_ids(self, **kwargs): + """Test message attachment with file ID.""" + self._do_test_message_attachment(file_path=self._get_data_file(), **kwargs) + + def _do_test_message_attachment(self, **kwargs): + """Test agent with the message attachment.""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + + # Create agent with file search tool + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + ) + assert agent.id, "Agent was not created" + + thread = ai_client.agents.create_thread() + assert thread.id, "The thread was not created." + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment( + file_id=file_id, + data_sources=kwargs.get("data_sources"), + tools=[FileSearchTool().definitions[0], CodeInterpreterTool().definitions[0]], + ) + message = ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + assert message.id, "The message was not created." + + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id, "The run was not created." + self._remove_file_maybe(file_id, ai_client) + ai_client.agents.delete_agent(agent.id) + + messages = ai_client.agents.list_messages(thread_id=thread.id) + assert len(messages), "No messages were created" + + @agentClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_interpreter_azure(self, **kwargs): + """Test Create assistant with code interpreter with azure asset ids.""" + ds = VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ) + self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) + + @agentClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_interpreter_file_ids(self, **kwargs): + """Test Create assistant with code interpreter with file IDs.""" + self._do_test_create_assistant_with_interpreter(file_path=self._get_data_file(), **kwargs) + + def _do_test_create_assistant_with_interpreter(self, **kwargs): + """Test create assistant with code interpreter and project asset id""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + code_interpreter = CodeInterpreterTool() + + file_id = None + if "file_path" in kwargs: + file = ai_client.agents.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS) + assert file.id, "The file was not uploaded." + file_id = file.id + + cdr = CodeInterpreterToolResource( + file_ids=[file_id] if file_id else None, data_sources=kwargs.get("data_sources") + ) + tr = ToolResources(code_interpreter=cdr) + # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + tool_resources=tr, + ) + assert agent.id, "Agent was not created" + + thread = ai_client.agents.create_thread() + assert thread.id, "The thread was not created." + + message = ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id, "The run was not created." + self._remove_file_maybe(file_id, ai_client) + assert run.status == "completed", f"Error in run: {run.last_error}" + ai_client.agents.delete_agent(agent.id) + assert len(ai_client.agents.list_messages(thread_id=thread.id)), "No messages were created" + + @agentClientPreparer() + @recorded_by_proxy + def test_create_thread_with_interpreter_azure(self, **kwargs): + """Test Create assistant with code interpreter with azure asset ids.""" + ds = VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ) + self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) + + @agentClientPreparer() + @recorded_by_proxy + def test_create_thread_with_interpreter_file_ids(self, **kwargs): + """Test Create assistant with code interpreter with file IDs.""" + self._do_test_create_thread_with_interpreter(file_path=self._get_data_file(), **kwargs) + + def _do_test_create_thread_with_interpreter(self, **kwargs): + """Test create assistant with code interpreter and project asset id""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + code_interpreter = CodeInterpreterTool() + + file_id = None + if "file_path" in kwargs: + file = ai_client.agents.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS) + assert file.id, "The file was not uploaded." + file_id = file.id + + cdr = CodeInterpreterToolResource( + file_ids=[file_id] if file_id else None, data_sources=kwargs.get("data_sources") + ) + tr = ToolResources(code_interpreter=cdr) + # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment + agent = ai_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + assert agent.id, "Agent was not created" + + thread = ai_client.agents.create_thread(tool_resources=tr) + assert thread.id, "The thread was not created." + + message = ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id, "The run was not created." + self._remove_file_maybe(file_id, ai_client) + assert run.status == "completed", f"Error in run: {run.last_error}" + ai_client.agents.delete_agent(agent.id) + messages = ai_client.agents.list_messages(thread.id) + assert len(messages) + + @agentClientPreparer() + @recorded_by_proxy + def test_create_assistant_with_inline_vs_azure(self, **kwargs): + """Test creation of asistant with vector store inline.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + ds = [ + VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + ) + ] + fs = FileSearchToolResource( + vector_stores=[ + VectorStoreAzureConfigurations( + store_name="my_vector_store", store_configuration=VectorStorageConfiguration(data_sources=ds) + ) + ] + ) + file_search = FileSearchTool() + agent = ai_client.agents.create_agent( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=ToolResources(file_search=fs), + ) + assert agent.id + + thread = ai_client.agents.create_thread() + assert thread.id + # create message + message = ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = ai_client.agents.list_messages(thread.id) + assert len(messages) + ai_client.agents.delete_agent(agent.id) + ai_client.close() + + @agentClientPreparer() + @recorded_by_proxy + def test_create_attachment_in_thread_azure(self, **kwargs): + """Create thread with message attachment inline with azure asset IDs.""" + ds = VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ) + self._do_test_create_attachment_in_thread_azure(data_sources=[ds], **kwargs) + + @agentClientPreparer() + @recorded_by_proxy + def test_create_attachment_in_thread_file_ids(self, **kwargs): + """Create thread with message attachment inline with azure asset IDs.""" + self._do_test_create_attachment_in_thread_azure(file_path=self._get_data_file(), **kwargs) + + def _do_test_create_attachment_in_thread_azure(self, **kwargs): + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + file_id = self._get_file_id_maybe(ai_client, **kwargs) + + file_search = FileSearchTool() + agent = ai_client.agents.create_agent( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + ) + assert agent.id + + # create message + attachment = MessageAttachment( + file_id=file_id, + data_sources=kwargs.get("data_sources"), + tools=[FileSearchTool().definitions[0], CodeInterpreterTool().definitions[0]], + ) + message = ThreadMessageOptions(role="user", content="What does the attachment say?", attachments=[attachment]) + thread = ai_client.agents.create_thread(messages=[message]) + assert thread.id + + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = ai_client.agents.list_messages(thread.id) + assert len(messages) + ai_client.agents.delete_agent(agent.id) + ai_client.close() + + def _get_file_id_maybe(self, ai_client: AIProjectClient, **kwargs) -> str: + """Return file id if kwargs has file path.""" + if "file_path" in kwargs: + file = ai_client.agents.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS) + assert file.id, "The file was not uploaded." + return file.id + return None + + def _remove_file_maybe(self, file_id: str, ai_client: AIProjectClient) -> None: + """Remove file if we have file ID.""" + if file_id: + ai_client.agents.delete_file(file_id) + @agentClientPreparer() @recorded_by_proxy def test_code_interpreter_and_save_file(self, **kwargs): @@ -1178,7 +1657,6 @@ def test_code_interpreter_and_save_file(self, **kwargs): # create client with self.create_client(**kwargs) as client: - file: OpenAIFile = None with tempfile.TemporaryDirectory() as temp_dir: @@ -1188,7 +1666,9 @@ def test_code_interpreter_and_save_file(self, **kwargs): with open(test_file_path, "w") as f: f.write("This is a test file") - file = client.agents.upload_file_and_poll(file_path=test_file_path, purpose=FilePurpose.AGENTS) + file: OpenAIFile = client.agents.upload_file_and_poll( + file_path=test_file_path, purpose=FilePurpose.AGENTS + ) # create agent code_interpreter = CodeInterpreterTool(file_ids=[file.id]) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py new file mode 100644 index 000000000000..24ebd1567982 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py @@ -0,0 +1,1628 @@ +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# pylint: disable=too-many-lines +# # ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import datetime +import functools +import json +import logging +import os +import pytest +import sys +import time + +from azure.ai.projects.aio import AIProjectClient +from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader +from devtools_testutils.aio import recorded_by_proxy_async +from azure.ai.projects.models import ( + CodeInterpreterTool, + CodeInterpreterToolResource, + FilePurpose, + FileSearchTool, + FileSearchToolResource, + FunctionTool, + MessageAttachment, + ThreadMessageOptions, + ToolResources, + ToolSet, + VectorStore, + VectorStoreAzureConfigurations, + VectorStorageConfiguration, + VectorStorageDataSource, + VectorStorageDataSourceAssetType, +) + + +# TODO clean this up / get rid of anything not in use + +""" +issues I've noticed with the code: + delete_thread(thread.id) fails + cancel_thread(thread.id) expires/times out occasionally + added time.sleep() to the beginning of my last few tests to avoid limits + when using the endpoint from Howie, delete_agent(agent.id) did not work but would not cause an error +""" + +# Set to True to enable SDK logging +LOGGING_ENABLED = True + +if LOGGING_ENABLED: + # Create a logger for the 'azure' SDK + # See https://docs.python.org/3/library/logging.html + logger = logging.getLogger("azure") + logger.setLevel(logging.DEBUG) # INFO or DEBUG + + # Configure a console output + handler = logging.StreamHandler(stream=sys.stdout) + logger.addHandler(handler) + + +agentClientPreparer = functools.partial( + EnvironmentVariableLoader, + "azure_ai_projects", + azure_ai_projects_connection_string="https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm", + azure_ai_projects_data_path="azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/rg-resour-cegr-oupfoo1/workspaces/abcd-abcdabcdabcda-abcdefghijklm/datastores/workspaceblobstore/paths/LocalUpload/000000000000/product_info_1.md", +) +""" +agentClientPreparer = functools.partial( + EnvironmentVariableLoader, + 'azure_ai_project', + azure_ai_project_host_name="https://foo.bar.some-domain.ms", + azure_ai_project_subscription_id="00000000-0000-0000-0000-000000000000", + azure_ai_project_resource_group_name="rg-resour-cegr-oupfoo1", + azure_ai_project_workspace_name="abcd-abcdabcdabcda-abcdefghijklm", +) +""" + + +# create tool for agent use +def fetch_current_datetime_live(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + current_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + time_json = json.dumps({"current_time": current_datetime}) + return time_json + + +# create tool for agent use +def fetch_current_datetime_recordings(): + """ + Get the current time as a JSON string. + + :return: Static time string so that test recordings work. + :rtype: str + """ + time_json = json.dumps({"current_time": "2024-10-10 12:30:19"}) + return time_json + + +# Statically defined user functions for fast reference +user_functions_recording = {"fetch_current_datetime": fetch_current_datetime_recordings} +user_functions_live = {"fetch_current_datetime": fetch_current_datetime_live} + + +# The test class name needs to start with "Test" to get collected by pytest +class TestagentClientAsync(AzureRecordedTestCase): + + # helper function: create client and using environment variables + def create_client(self, **kwargs): + # fetch environment variables + connection_string = kwargs.pop("azure_ai_projects_connection_string") + credential = self.get_credential(AIProjectClient, is_async=False) + + # create and return client + client = AIProjectClient.from_connection_string( + credential=credential, + conn_str=connection_string, + ) + + return client + + def _get_data_file(self) -> str: + """Return the test file name.""" + return os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") + + # for debugging purposes: if a test fails and its agent has not been deleted, it will continue to show up in the agents list + """ + # NOTE: this test should not be run against a shared resource, as it will delete all agents + @agentClientPreparer() + @recorded_by_proxy_async + async def test_clear_client(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # clear agent list + agents = client.agents.list_agents().data + for agent in agents: + client.agents.delete_agent(agent.id) + assert client.agents.list_agents().data.__len__() == 0 + + # close client + client.close() + """ + + # # ********************************************************************************** + # # + # # UNIT TESTS + # # + # # ********************************************************************************** + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - agent APIs + # # + # # ********************************************************************************** + + # test client creation + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_client(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # close client + client.close() + + # test agent creation and deletion + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_delete_agent(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + print("Created client") + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + # test agent creation with tools + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_agent_with_tools(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # initialize agent functions + functions = FunctionTool(functions=user_functions_recording) + + # create agent with tools + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=functions.definitions + ) + assert agent.id + print("Created agent, agent ID", agent.id) + assert agent.tools + assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_update_agent(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + + # update agent and confirm changes went through + await agent.update(name="my-agent2", instructions="You are helpful agent") + assert agent.name + assert agent.name == "my-agent2" + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + """ + DISABLED: can't perform consistently on shared resource + @agentClientPreparer() + @recorded_by_proxy_async + async def test_agent_list(self, **kwargs): + # create client and ensure there are no previous agents + client = self.create_client(**kwargs) + list_length = client.agents.list_agents().data.__len__() + + # create agent and check that it appears in the list + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert client.agents.list_agents().data.__len__() == list_length + 1 + assert client.agents.list_agents().data[0].id == agent.id + + # create second agent and check that it appears in the list + agent2 = client.agents.create_agent(model="gpt-4o", name="my-agent2", instructions="You are helpful agent") + assert client.agents.list_agents().data.__len__() == list_length + 2 + assert client.agents.list_agents().data[0].id == agent.id or client.agents.list_agents().data[1].id == agent.id + + # delete agents and check list + client.agents.delete_agent(agent.id) + assert client.agents.list_agents().data.__len__() == list_length + 1 + assert client.agents.list_agents().data[0].id == agent2.id + + client.agents.delete_agent(agent2.id) + assert client.agents.list_agents().data.__len__() == list_length + print("Deleted agents") + + # close client + client.close() + """ + + # ********************************************************************************** + # + # HAPPY PATH SERVICE TESTS - Thread APIs + # + # ********************************************************************************** + + # test creating thread + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = await client.agents.create_thread() + # assert isinstance(thread, agentThread) TODO finish this ! need to import agentThread from _models + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + # test getting thread + @agentClientPreparer() + @recorded_by_proxy_async + async def test_get_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = await client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # get thread + thread2 = await client.agents.get_thread(thread.id) + assert thread2.id + assert thread.id == thread2.id + print("Got thread, thread ID", thread2.id) + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + """ + TODO what can I update a thread with? + # test updating thread + @agentClientPreparer() + @recorded_by_proxy_async + async def test_update_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # update thread + client.agents.update_thread(thread.id, ) # TODO what can we update it with? + assert not thread + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + """ + # TODO this test is failing? client.agents.delete_thread(thread.id) isn't working + # status_code = 404, response = + # error_map = {304: , 401: , 409: } + + # test deleting thread + @agentClientPreparer() + @recorded_by_proxy_async + async def test_delete_thread(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + # assert isinstance(thread, agentThread) TODO finish this ! need to import agentThread from _models + assert thread.id + print("Created thread, thread ID", thread.id) + + # delete thread + deletion_status = client.agents.delete_thread(thread.id) + # assert not thread + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Message APIs + # # + # # ********************************************************************************** + + # test creating message in a thread + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_message(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = await client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + # test creating multiple messages in a thread + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_multiple_messages(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = await client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create messages + message = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + message2 = await client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me another joke" + ) + assert message2.id + print("Created message, message ID", message2.id) + message3 = await client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a third joke" + ) + assert message3.id + print("Created message, message ID", message3.id) + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + # test listing messages in a thread + @agentClientPreparer() + @recorded_by_proxy_async + async def test_list_messages(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = await client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check that initial message list is empty + messages0 = await client.agents.list_messages(thread_id=thread.id) + print(messages0.data) + assert messages0.data.__len__() == 0 + + # create messages and check message list for each one + message1 = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message1.id + print("Created message, message ID", message1.id) + messages1 = await client.agents.list_messages(thread_id=thread.id) + assert messages1.data.__len__() == 1 + assert messages1.data[0].id == message1.id + + message2 = await client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me another joke" + ) + assert message2.id + print("Created message, message ID", message2.id) + messages2 = await client.agents.list_messages(thread_id=thread.id) + assert messages2.data.__len__() == 2 + assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id + + message3 = await client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a third joke" + ) + assert message3.id + print("Created message, message ID", message3.id) + messages3 = await client.agents.list_messages(thread_id=thread.id) + assert messages3.data.__len__() == 3 + assert ( + messages3.data[0].id == message3.id + or messages3.data[1].id == message2.id + or messages3.data[2].id == message2.id + ) + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + # test getting message in a thread + @agentClientPreparer() + @recorded_by_proxy_async + async def test_get_message(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = await client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # get message + message2 = await client.agents.get_message(thread_id=thread.id, message_id=message.id) + assert message2.id + assert message.id == message2.id + print("Got message, message ID", message.id) + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + """ + TODO format the updated body + # test updating message in a thread + @agentClientPreparer() + @recorded_by_proxy_async + async def test_update_message(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # update message + body_json = json.dumps # TODO format body into json -- figure out what the message looks like so I can update it (might be in that picture) + client.agents.update_message(thread_id=thread.id, message_id=message.id, body=) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Run APIs + # # + # # ********************************************************************************** + + # test creating run + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = await client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + # test getting run + @agentClientPreparer() + @recorded_by_proxy_async + async def test_get_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = await client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = await client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # get run + run2 = await client.agents.get_run(thread_id=thread.id, run_id=run.id) + assert run2.id + assert run.id == run2.id + print("Got run, run ID", run2.id) + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + # TODO fix bc sometimes it works? and sometimes it doesnt? + # test sucessful run status TODO test for cancelled/unsucessful runs + @agentClientPreparer() + @recorded_by_proxy_async + async def test_run_status(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = await client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.agents.get_run(thread_id=thread.id, run_id=run.id) + print("Run status:", run.status) + + assert run.status in ["cancelled", "failed", "completed", "expired"] + print("Run completed with status:", run.status) + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + """ + # TODO another, but check that the number of runs decreases after cancelling runs + # TODO can each thread only support one run? + # test listing runs + @agentClientPreparer() + @recorded_by_proxy_async + async def test_list_runs(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # check list for current runs + runs0 = client.agents.list_runs(thread_id=thread.id) + assert runs0.data.__len__() == 0 + + # create run and check list + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + runs1 = client.agents.list_runs(thread_id=thread.id) + assert runs1.data.__len__() == 1 + assert runs1.data[0].id == run.id + + # create second run + run2 = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run2.id + print("Created run, run ID", run2.id) + runs2 = client.agents.list_runs(thread_id=thread.id) + assert runs2.data.__len__() == 2 + assert runs2.data[0].id == run2.id or runs2.data[1].id == run2.id + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + """ + # TODO figure out what to update the run with + # test updating run + @agentClientPreparer() + @recorded_by_proxy_async + async def test_update_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # update run + body = json.dumps({'todo': 'placeholder'}) + client.agents.update_run(thread_id=thread.id, run_id=run.id, body=body) + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + # test submitting tool outputs to run + @agentClientPreparer() + @recorded_by_proxy_async + async def test_submit_tool_outputs_to_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # Initialize agent tools + functions = FunctionTool(user_functions_recording) + code_interpreter = CodeInterpreterTool() + + toolset = ToolSet() + toolset.add(functions) + toolset.add(code_interpreter) + + # create agent + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent", toolset=toolset + ) + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = await client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, what time is it?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # check that tools are uploaded + assert run.tools + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + time.sleep(1) + run = await client.agents.get_run(thread_id=thread.id, run_id=run.id) + + # check if tools are needed + if run.status == "requires_action" and run.required_action.submit_tool_outputs: + print("Requires action: submit tool outputs") + tool_calls = await run.required_action.submit_tool_outputs.tool_calls + if not tool_calls: + print( + "No tool calls provided - cancelling run" + ) # TODO how can i make sure that it wants tools? should i have some kind of error message? + await client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + break + + # submit tool outputs to run + tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here + print("Tool outputs:", tool_outputs) + if tool_outputs: + await client.agents.submit_tool_outputs_to_run( + thread_id=thread.id, run_id=run.id, tool_outputs=tool_outputs + ) + + print("Current run status:", run.status) + + print("Run completed with status:", run.status) + + # check that messages used the tool + messages = await client.agents.list_messages(thread_id=thread.id, run_id=run.id) + tool_message = messages["data"][0]["content"][0]["text"]["value"] + hour12 = time.strftime("%H") + hour24 = time.strftime("%I") + minute = time.strftime("%M") + assert hour12 + ":" + minute in tool_message or hour24 + ":" + minute + print("Used tool_outputs") + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + """ + # DISABLED: rewrite to ensure run is not complete when cancel_run is called + # test cancelling run + @agentClientPreparer() + @recorded_by_proxy_async + async def test_cancel_run(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + assert message.id + print("Created message, message ID", message.id) + + # create run + run = client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + # check status and cancel + assert run.status in ["queued", "in_progress", "requires_action"] + client.agents.cancel_run(thread_id=thread.id, run_id=run.id) + + while run.status in ["queued", "cancelling"]: + time.sleep(1) + run = client.agents.get_run(thread_id=thread.id, run_id=run.id) + print("Current run status:", run.status) + assert run.status == "cancelled" + print("Run cancelled") + + # delete agent and close client + client.agents.delete_agent(agent.id) + print("Deleted agent") + client.close() + """ + + # test create thread and run + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_and_run(self, **kwargs): + time.sleep(26) + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread and run + run = await client.agents.create_thread_and_run(assistant_id=agent.id) + assert run.id + assert run.thread_id + print("Created run, run ID", run.id) + + # get thread + thread = await client.agents.get_thread(run.thread_id) + assert thread.id + print("Created thread, thread ID", thread.id) + + # check status + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.agents.get_run(thread_id=thread.id, run_id=run.id) + # assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + assert run.status == "completed" + print("Run completed") + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + # test listing run steps + @agentClientPreparer() + @recorded_by_proxy_async + async def test_list_run_step(self, **kwargs): + + time.sleep(50) + # create client + client = await self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = await client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, what time is it?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + steps = await client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + # commenting assertion out below, do we know exactly when run starts? + # assert steps['data'].__len__() == 0 + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.agents.get_run(thread_id=thread.id, run_id=run.id) + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + steps = await client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + assert steps["data"].__len__() > 0 # TODO what else should we look at? + + assert run.status == "completed" + print("Run completed") + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + # test getting run step + # TODO where are step ids from + @agentClientPreparer() + @recorded_by_proxy_async + async def test_get_run_step(self, **kwargs): + # create client + client = self.create_client(**kwargs) + assert isinstance(client, AIProjectClient) + + # create agent + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + assert agent.id + print("Created agent, agent ID", agent.id) + + # create thread + thread = await client.agents.create_thread() + assert thread.id + print("Created thread, thread ID", thread.id) + + # create message + message = await client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, can you tell me a joke?" + ) + assert message.id + print("Created message, message ID", message.id) + + # create run + run = await client.agents.create_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id + print("Created run, run ID", run.id) + + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + + # check status + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + while run.status in ["queued", "in_progress", "requires_action"]: + # wait for a second + time.sleep(1) + run = await client.agents.get_run(thread_id=thread.id, run_id=run.id) + if run.status == "failed": + assert run.last_error + print(run.last_error) + print("FAILED HERE") + assert run.status in ["queued", "in_progress", "requires_action", "completed"] + print("Run status:", run.status) + + # list steps, check that get_run_step works with first step_id + steps = await client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + assert steps["data"].__len__() > 0 + step = steps["data"][0] + get_step = await client.agents.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + assert step == get_step + + # delete agent and close client + await client.agents.delete_agent(agent.id) + print("Deleted agent") + await client.close() + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_azure(self, **kwargs): + """Test the agent with vector store creation.""" + await self._do_test_create_vector_store(**kwargs) + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_file_id(self, **kwargs): + """Test the agent with vector store creation.""" + await self._do_test_create_vector_store(file_path=self._get_data_file(), **kwargs) + + async def _do_test_create_vector_store(self, **kwargs): + """Test the agent with vector store creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + file_ids = [file_id] if file_id else None + if file_ids: + ds = None + else: + ds = [ + VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = await ai_client.agents.create_vector_store_and_poll( + file_ids=file_ids, data_sources=ds, name="my_vectorstore" + ) + assert vector_store.id + await self._test_file_search(ai_client, vector_store, file_id) + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_add_file_file_id(self, **kwargs): + """Test adding single file to vector store withn file ID.""" + await self._do_test_create_vector_store_add_file(file_path=self._get_data_file(), **kwargs) + + @agentClientPreparer() + @pytest.mark.skip("The CreateVectorStoreFile API is not supported yet.") + @recorded_by_proxy_async + async def test_create_vector_store_add_file_azure(self, **kwargs): + """Test adding single file to vector store with azure asset ID.""" + await self._do_test_create_vector_store_add_file(**kwargs) + + async def _do_test_create_vector_store_add_file(self, **kwargs): + """Test adding single file to vector store.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + if file_id: + ds = None + else: + ds = [ + VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + assert vector_store.id + vector_store_file = await ai_client.agents.create_vector_store_file( + vector_store_id=vector_store.id, data_sources=ds, file_id=file_id + ) + assert vector_store_file.id + await self._test_file_search(ai_client, vector_store, file_id) + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_vector_store_batch_file_ids(self, **kwargs): + """Test adding multiple files to vector store with file IDs.""" + await self._do_test_create_vector_store_batch(file_path=self._get_data_file(), **kwargs) + + @agentClientPreparer() + @pytest.mark.skip("The CreateFileBatch API is not supported yet.") + @recorded_by_proxy_async + async def test_create_vector_store_batch_azure(self, **kwargs): + """Test adding multiple files to vector store with azure asset IDs.""" + await self._do_test_create_vector_store_batch(**kwargs) + + async def _do_test_create_vector_store_batch(self, **kwargs): + """Test the agent with vector store creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + if file_id: + file_ids = [file_id] + ds = None + else: + file_ids = None + ds = [ + VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + ) + ] + vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + assert vector_store.id + vector_store_file_batch = await ai_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids + ) + assert vector_store_file_batch.id + await self._test_file_search(ai_client, vector_store, file_id) + + async def _test_file_search(self, ai_client: AIProjectClient, vector_store: VectorStore, file_id: str) -> None: + """Test the file search""" + file_search = FileSearchTool(vector_store_ids=[vector_store.id]) + agent = await ai_client.agents.create_agent( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert agent.id + thread = await ai_client.agents.create_thread() + assert thread.id + # create message + message = await ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + await ai_client.agents.delete_vector_store(vector_store.id) + assert run.status == "completed" + messages = await ai_client.agents.list_messages(thread_id=thread.id) + assert len(messages) + self._remove_file_maybe(file_id, ai_client) + # delete agent and close client + await ai_client.agents.delete_agent(agent.id) + print("Deleted agent") + await ai_client.close() + + @agentClientPreparer() + @pytest.mark.skip("The CreateFileBatch API is not supported yet.") + @recorded_by_proxy_async + async def test_message_attachement_azure(self, **kwargs): + """Test message attachment with azure ID.""" + ds = VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ) + await self._do_test_message_attachment(data_sources=[ds], **kwargs) + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_message_attachement_file_ids(self, **kwargs): + """Test message attachment with file ID.""" + await self._do_test_message_attachment(file_path=self._get_data_file(), **kwargs) + + async def _do_test_message_attachment(self, **kwargs): + """Test agent with the message attachment.""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + + # Create agent with file search tool + agent = await ai_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + ) + assert agent.id, "Agent was not created" + + thread = await ai_client.agents.create_thread() + assert thread.id, "The thread was not created." + + # Create a message with the file search attachment + # Notice that vector store is created temporarily when using attachments with a default expiration policy of seven days. + attachment = MessageAttachment( + file_id=file_id, + data_sources=kwargs.get("data_sources"), + tools=[FileSearchTool().definitions[0], CodeInterpreterTool().definitions[0]], + ) + message = await ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + ) + assert message.id, "The message was not created." + + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id, "The run was not created." + await self._remove_file_maybe(file_id, ai_client) + await ai_client.agents.delete_agent(agent.id) + + messages = await ai_client.agents.list_messages(thread_id=thread.id) + assert len(messages), "No messages were created" + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_vector_store_threads_file_search_azure(self, **kwargs): + """Test file search when azure asset ids are supplied during thread creation.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + ds = [ + VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + ) + ] + fs = FileSearchToolResource( + vector_stores=[ + VectorStoreAzureConfigurations( + store_name="my_vector_store", store_configuration=VectorStorageConfiguration(data_sources=ds) + ) + ] + ) + file_search = FileSearchTool() + agent = await ai_client.agents.create_agent( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=file_search.resources, + ) + assert agent.id + + thread = await ai_client.agents.create_thread(tool_resources=ToolResources(file_search=fs)) + assert thread.id + # create message + message = await ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = await ai_client.agents.list_messages(thread.id) + assert len(messages) + await ai_client.agents.delete_agent(agent.id) + await ai_client.close() + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_assistant_with_interpreter_azure(self, **kwargs): + """Test Create assistant with code interpreter with azure asset ids.""" + ds = VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ) + await self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_assistant_with_interpreter_file_ids(self, **kwargs): + """Test Create assistant with code interpreter with file IDs.""" + await self._do_test_create_assistant_with_interpreter(file_path=self._get_data_file(), **kwargs) + + async def _do_test_create_assistant_with_interpreter(self, **kwargs): + """Test create assistant with code interpreter and project asset id""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + code_interpreter = CodeInterpreterTool() + + file_id = None + if "file_path" in kwargs: + file = await ai_client.agents.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS + ) + assert file.id, "The file was not uploaded." + file_id = file.id + + cdr = CodeInterpreterToolResource( + file_ids=[file_id] if file_id else None, data_sources=kwargs.get("data_sources") + ) + tr = ToolResources(code_interpreter=cdr) + # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment + agent = await ai_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=code_interpreter.definitions, + tool_resources=tr, + ) + assert agent.id, "Agent was not created" + + thread = await ai_client.agents.create_thread() + assert thread.id, "The thread was not created." + + message = await ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id, "The run was not created." + await self._remove_file_maybe(file_id, ai_client) + assert run.status == "completed", f"Error in run: {run.last_error}" + await ai_client.agents.delete_agent(agent.id) + messages = await ai_client.agents.list_messages(thread_id=thread.id) + assert len(messages), "No messages were created" + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_with_interpreter_azure(self, **kwargs): + """Test Create assistant with code interpreter with azure asset ids.""" + ds = VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ) + await self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_thread_with_interpreter_file_ids(self, **kwargs): + """Test Create assistant with code interpreter with file IDs.""" + await self._do_test_create_thread_with_interpreter(file_path=self._get_data_file(), **kwargs) + + async def _do_test_create_thread_with_interpreter(self, **kwargs): + """Test create assistant with code interpreter and project asset id""" + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + code_interpreter = CodeInterpreterTool() + + file_id = None + if "file_path" in kwargs: + file = await ai_client.agents.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS + ) + assert file.id, "The file was not uploaded." + file_id = file.id + + cdr = CodeInterpreterToolResource( + file_ids=[file_id] if file_id else None, data_sources=kwargs.get("data_sources") + ) + tr = ToolResources(code_interpreter=cdr) + # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment + agent = await ai_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=code_interpreter.definitions, + ) + assert agent.id, "Agent was not created" + + thread = await ai_client.agents.create_thread(tool_resources=tr) + assert thread.id, "The thread was not created." + + message = await ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + assert run.id, "The run was not created." + await self._remove_file_maybe(file_id, ai_client) + assert run.status == "completed", f"Error in run: {run.last_error}" + ai_client.agents.delete_agent(agent.id) + messages = await ai_client.agents.list_messages(thread.id) + assert len(messages) + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_assistant_with_inline_vs_azure(self, **kwargs): + """Test creation of asistant with vector store inline.""" + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + ds = [ + VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + ) + ] + fs = FileSearchToolResource( + vector_stores=[ + VectorStoreAzureConfigurations( + store_name="my_vector_store", store_configuration=VectorStorageConfiguration(data_sources=ds) + ) + ] + ) + file_search = FileSearchTool() + agent = await ai_client.agents.create_agent( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + tool_resources=ToolResources(file_search=fs), + ) + assert agent.id + + thread = await ai_client.agents.create_thread() + assert thread.id + # create message + message = await ai_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?" + ) + assert message.id, "The message was not created." + + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = await ai_client.agents.list_messages(thread.id) + assert len(messages) + await ai_client.agents.delete_agent(agent.id) + await ai_client.close() + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_attachment_in_thread_azure(self, **kwargs): + """Create thread with message attachment inline with azure asset IDs.""" + ds = VectorStorageDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ) + await self._do_test_create_attachment_in_thread_azure(data_sources=[ds], **kwargs) + + @agentClientPreparer() + @recorded_by_proxy_async + async def test_create_attachment_in_thread_file_ids(self, **kwargs): + """Create thread with message attachment inline with azure asset IDs.""" + await self._do_test_create_attachment_in_thread_azure(file_path=self._get_data_file(), **kwargs) + + async def _do_test_create_attachment_in_thread_azure(self, **kwargs): + # create client + ai_client = self.create_client(**kwargs) + assert isinstance(ai_client, AIProjectClient) + + file_id = await self._get_file_id_maybe(ai_client, **kwargs) + + file_search = FileSearchTool() + agent = await ai_client.agents.create_agent( + model="gpt-4o", + name="my-assistant", + instructions="Hello, you are helpful assistant and can search information from uploaded files", + tools=file_search.definitions, + ) + assert agent.id + + # create message + attachment = MessageAttachment( + file_id=file_id, + data_sources=kwargs.get("data_sources"), + tools=[FileSearchTool().definitions[0], CodeInterpreterTool().definitions[0]], + ) + message = ThreadMessageOptions(role="user", content="What does the attachment say?", attachments=[attachment]) + thread = await ai_client.agents.create_thread(messages=[message]) + assert thread.id + + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + assert run.status == "completed", f"Error in run: {run.last_error}" + messages = await ai_client.agents.list_messages(thread.id) + assert len(messages) + await ai_client.agents.delete_agent(agent.id) + await ai_client.close() + + async def _get_file_id_maybe(self, ai_client: AIProjectClient, **kwargs) -> str: + """Return file id if kwargs has file path.""" + if "file_path" in kwargs: + file = await ai_client.agents.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS + ) + assert file.id, "The file was not uploaded." + return file.id + return None + + async def _remove_file_maybe(self, file_id: str, ai_client: AIProjectClient) -> None: + """Remove file if we have file ID.""" + if file_id: + await ai_client.agents.delete_file(file_id) + + # # ********************************************************************************** + # # + # # HAPPY PATH SERVICE TESTS - Streaming APIs + # # + # # ********************************************************************************** + + # # ********************************************************************************** + # # + # # NEGATIVE TESTS - TODO idk what goes here + # # + # # ********************************************************************************** + + """ + # DISABLED, PASSES LIVE ONLY: recordings don't capture DNS lookup errors + # test agent creation and deletion + @agentClientPreparer() + @recorded_by_proxy_async + async def test_negative_create_delete_agent(self, **kwargs): + # create client using bad endpoint + bad_connection_string = "https://foo.bar.some-domain.ms;00000000-0000-0000-0000-000000000000;rg-resour-cegr-oupfoo1;abcd-abcdabcdabcda-abcdefghijklm" + + credential = self.get_credential(AIProjectClient, is_async=False) + client = AIProjectClient.from_connection_string( + credential=credential, + connection=bad_connection_string, + ) + + # attempt to create agent with bad client + exception_caught = False + try: + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + # check for error (will not have a status code since it failed on request -- no response was recieved) + except (ServiceRequestError, HttpResponseError) as e: + exception_caught = True + if type(e) == ServiceRequestError: + assert e.message + assert "failed to resolve 'foo.bar.some-domain.ms'" in e.message.lower() + else: + assert "No such host is known" and "foo.bar.some-domain.ms" in str(e) + + # close client and confirm an exception was caught + client.close() + assert exception_caught + """ diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py b/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py new file mode 100644 index 000000000000..5b69d17c6781 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py @@ -0,0 +1,41 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import unittest +from azure.ai.projects._model_base import _deserialize +from azure.ai.projects.models import _models + + +class Test(unittest.TestCase): + + def testName(self): + val = { + "id": "vs_OQpX6y9YM368EBZ5GmF45kRO", + "object": "vector_store", + "name": "TV Support FAQ", + "status": "completed", + "usage_bytes": 0, + "created_at": 1729730726, + "file_counts": {"in_progress": 0, "completed": 0, "failed": 0, "cancelled": 0, "total": 0}, + "metadata": {"source": "Assistant API Tests"}, + "expires_after": None, + "expires_at": None, + "last_active_at": 1729730726, + "configuration": { + "data_sources": [ + { + "type": "uri_asset", + "uri": "azureml://subscriptions/10e1de13-9717-4242-acf5-3e241940d326/resourcegroups/rg-sawidderai/workspaces/sawidder-0278/datastores/workspaceblobstore/paths/UI/2024-10-01_001042_UTC/unit-test.txt", + } + ] + }, + "configuration1": {}, + } + # json_val = json.dumps(val) + vct = _deserialize(_models.VectorStore, val) + +if __name__ == "__main__": + # import sys;sys.argv = ['', 'Test.testName'] + unittest.main() diff --git a/sdk/ai/azure-ai-projects/tests/check_sample_name.sh b/sdk/ai/azure-ai-projects/tests/check_sample_name.sh new file mode 100644 index 000000000000..e6a6f70dffa2 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/check_sample_name.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# This is simple helper script to chreck the name of a file +# the name sgould encounter in two places: +# FILE: $fname +# ... +# python $fname +# If the file contain its name less times, we print its name. + +SAMPLES_SYNC="`dirname ${0}`/../samples/agents" +SAMPLES_ASYNC="`dirname ${0}`/../samples/agents/async_samples" + +for sample_dir in "$SAMPLES_SYNC" "$SAMPLES_ASYNC"; do + for fname in `ls "$sample_dir" | grep \^sample_ | grep \[.\]py\$`; do + cnt=`grep -c "${fname}" "${sample_dir}/${fname}"` + if [ $cnt -lt 2 ]; then + echo "${sample_dir}/${fname} name encountered ${cnt} times." + fi + done +done +exit 0 diff --git a/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py b/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py index ef28272b0e8a..131aac05c971 100644 --- a/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py +++ b/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py @@ -15,6 +15,7 @@ from evaluation_test_base import EvaluationsTestBase, servicePreparerEvaluationsTests + class TestEvaluation(EvaluationsTestBase): @servicePreparerEvaluationsTests() diff --git a/sdk/ai/azure-ai-projects/tests/test_data/product_info_1.md b/sdk/ai/azure-ai-projects/tests/test_data/product_info_1.md new file mode 100644 index 000000000000..041155831d53 --- /dev/null +++ b/sdk/ai/azure-ai-projects/tests/test_data/product_info_1.md @@ -0,0 +1,51 @@ +# Information about product item_number: 1 + +## Brand +Contoso Galaxy Innovations + +## Category +Smart Eyewear + +## Features +- Augmented Reality interface +- Voice-controlled AI assistant +- HD video recording with 3D audio +- UV protection and blue light filtering +- Wireless charging with extended battery life + +## User Guide + +### 1. Introduction +Introduction to your new SmartView Glasses + +### 2. Product Overview +Overview of features and controls + +### 3. Sizing and Fit +Finding your perfect fit and style adjustments + +### 4. Proper Care and Maintenance +Cleaning and caring for your SmartView Glasses + +### 5. Break-in Period +Adjusting to the augmented reality experience + +### 6. Safety Tips +Safety guidelines for public and private spaces + +### 7. Troubleshooting +Quick fixes for common issues + +## Warranty Information +Two-year limited warranty on all electronic components + +## Contact Information +Customer Support at support@contoso-galaxy-innovations.com + +## Return Policy +30-day return policy with no questions asked + +## FAQ +- How to sync your SmartView Glasses with your devices +- Troubleshooting connection issues +- Customizing your augmented reality environment diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index 26d7771f403f..b32318114a16 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: 89ff4b214a55d27f3dc78d3c2ddf9a9523622d5d +commit: 17ab26e678f06460cd154e2c0cb187aae158f4bf repo: Azure/azure-rest-api-specs additionalDirectories: From 3e483843f3136a0380c6ca2adfaf7e773c03868d Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 8 Nov 2024 10:14:19 -0800 Subject: [PATCH 095/138] Fix PR comment by Krista on async credential in ConnectionProperties class (#38424) --- .vscode/cspell.json | 3 ++- eng/.docsettings.yml | 1 + sdk/ai/azure-ai-projects/README.md | 2 +- sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py | 6 +++++- sdk/ai/azure-ai-projects/setup.py | 2 +- 5 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.vscode/cspell.json b/.vscode/cspell.json index 25c093b71a35..1d045151f40f 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -1325,7 +1325,8 @@ { "filename": "sdk/ai/azure-ai-projects/**", "words": [ - "aiservices" + "aiservices", + "OTEL" ] }, { diff --git a/eng/.docsettings.yml b/eng/.docsettings.yml index 9110bbeaac14..17a9a3d2eda0 100644 --- a/eng/.docsettings.yml +++ b/eng/.docsettings.yml @@ -15,6 +15,7 @@ omitted_paths: - sdk/ml/azure-ai-ml/tests/* - sdk/vision/azure-ai-vision-imageanalysis/tests/* - sdk/ai/azure-ai-inference/tests/* + - sdk/ai/azure-ai-projects/tests/* - sdk/storage/azure-storage-extensions/* language: python diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 360b319741db..3f2f99ff9450 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -223,7 +223,7 @@ You can enhance your agents by defining callback functions as function tools. Th For more details about calling functions by code, refer to [`sample_agents_stream_eventhandler_with_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py) and [`sample_agents_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py). -Here is an example to use [user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/user_function.py) in `toolset`: +Here is an example to use [user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/user_functions.py) in `toolset`: ```python diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index c407605ec666..08d483db4895 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -17,6 +17,7 @@ import re from azure.core.credentials import TokenCredential, AccessToken +from azure.core.credentials_async import AsyncTokenCredential from ._enums import AgentStreamEvent, ConnectionType from ._models import ( @@ -127,7 +128,10 @@ class ConnectionProperties: """ def __init__( - self, *, connection: GetConnectionResponse, token_credential: Optional[TokenCredential] = None + self, + *, + connection: GetConnectionResponse, + token_credential: Union[TokenCredential, AsyncTokenCredential, None] = None, ) -> None: self.id = connection.id self.name = connection.name diff --git a/sdk/ai/azure-ai-projects/setup.py b/sdk/ai/azure-ai-projects/setup.py index 37a6290f3338..a3190a395e7d 100644 --- a/sdk/ai/azure-ai-projects/setup.py +++ b/sdk/ai/azure-ai-projects/setup.py @@ -13,7 +13,7 @@ PACKAGE_NAME = "azure-ai-projects" -PACKAGE_PPRINT_NAME = "Azure Ai Projects" +PACKAGE_PPRINT_NAME = "Azure AI Projects" # a-b-c => a/b/c package_folder_path = PACKAGE_NAME.replace("-", "/") From ad6cddfe0fb5143383e6a47cc4e82ca6f50c8415 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 8 Nov 2024 11:19:14 -0800 Subject: [PATCH 096/138] Black --- sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py | 1 + sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py b/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py index 5b69d17c6781..ffad05175c55 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py @@ -36,6 +36,7 @@ def testName(self): # json_val = json.dumps(val) vct = _deserialize(_models.VectorStore, val) + if __name__ == "__main__": # import sys;sys.argv = ['', 'Test.testName'] unittest.main() diff --git a/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py b/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py index 131aac05c971..ef28272b0e8a 100644 --- a/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py +++ b/sdk/ai/azure-ai-projects/tests/evaluation/test_evaluation.py @@ -15,7 +15,6 @@ from evaluation_test_base import EvaluationsTestBase, servicePreparerEvaluationsTests - class TestEvaluation(EvaluationsTestBase): @servicePreparerEvaluationsTests() From 1722934de6511695747abfdff1afc9ca9c74b2f1 Mon Sep 17 00:00:00 2001 From: Ankit Singhal <30610298+singankit@users.noreply.github.com> Date: Fri, 8 Nov 2024 12:34:32 -0800 Subject: [PATCH 097/138] User/singankit/evaluation spellcheck (#38427) * Spell check errors evaluation * Removing test environment from evaluation schedule test --- .vscode/cspell.json | 2 ++ .../{ => data}/evaluate_test_data.jsonl | 0 .../async_samples/sample_evaluations_async.py | 2 +- .../evaluations/{ => data}/evaluate_test_data.jsonl | 0 .../samples/evaluations/sample_evaluations.py | 2 +- .../evaluations/sample_evaluations_schedules.py | 13 +++++-------- 6 files changed, 9 insertions(+), 10 deletions(-) rename sdk/ai/azure-ai-projects/samples/evaluations/async_samples/{ => data}/evaluate_test_data.jsonl (100%) rename sdk/ai/azure-ai-projects/samples/evaluations/{ => data}/evaluate_test_data.jsonl (100%) diff --git a/.vscode/cspell.json b/.vscode/cspell.json index 1d045151f40f..70752af4cc66 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -34,6 +34,8 @@ "sdk/ai/**/index/**", "sdk/ai/azure-ai-generative/tests/**", "sdk/ai/azure-ai-projects/samples/agents/nifty_500_quarterly_results.csv", + "/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/data/**", + "/sdk/ai/azure-ai-projects/samples/evaluations/data/**", "sdk/ai/azure-ai-resources/azure/ai/resources/_index/_langchain/vendor/**", "sdk/ai/azure-ai-resources/azure/ai/resources/_restclient/**", "sdk/cognitiveservices/azure-cognitiveservices-search-autosuggest/**", diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/evaluate_test_data.jsonl b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/data/evaluate_test_data.jsonl similarity index 100% rename from sdk/ai/azure-ai-projects/samples/evaluations/async_samples/evaluate_test_data.jsonl rename to sdk/ai/azure-ai-projects/samples/evaluations/async_samples/data/evaluate_test_data.jsonl diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py index ee490f846797..77b590d93267 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py @@ -37,7 +37,7 @@ async def main(): ) # Upload data for evaluation - data_id, _ = project_client.upload_file("./evaluate_test_data.jsonl") + data_id, _ = project_client.upload_file("./data/evaluate_test_data.jsonl") default_connection = await project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/evaluate_test_data.jsonl b/sdk/ai/azure-ai-projects/samples/evaluations/data/evaluate_test_data.jsonl similarity index 100% rename from sdk/ai/azure-ai-projects/samples/evaluations/evaluate_test_data.jsonl rename to sdk/ai/azure-ai-projects/samples/evaluations/data/evaluate_test_data.jsonl diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py index 435e43875821..c322d3d3ed3f 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py @@ -37,7 +37,7 @@ ) # Upload data for evaluation -data_id, _ = project_client.upload_file("./evaluate_test_data.jsonl") +data_id, _ = project_client.upload_file("./data/evaluate_test_data.jsonl") default_connection = project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py index 18cbb8fb59ac..c78f94c37e17 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py @@ -15,10 +15,10 @@ def main(): # Project Configuration Canary - Subscription = "72c03bf3-4e69-41af-9532-dfcdc3eefef4" - ResourceGroup = "apeddau-rg-westus2" - Workspace = "apeddau-canay-ws-eastus2euap" - Endpoint = "eastus2euap.api.azureml.ms" + Subscription = "" + ResourceGroup = "" + Workspace = ">" + Endpoint = "" # Create an Azure AI client ai_client = AIProjectClient.from_connection_string( @@ -30,7 +30,7 @@ def main(): # Sample for creating an evaluation schedule with recurrence trigger of daily frequency app_insights_config = ApplicationInsightsConfiguration( - resource_id="/subscriptions/72c03bf3-4e69-41af-9532-dfcdc3eefef4/resourceGroups/apeddau-rg-centraluseuap/providers/Microsoft.insights/components/apeddauwscentr0026977484", + resource_id="", query='traces | where message contains ""', service_name="sample_service_name", ) @@ -48,9 +48,6 @@ def main(): name = "CANARY-ONLINE-EVAL-TEST-WS-ENV-104" description = "Testing Online eval command job in CANARY environment" tags = {"tag1": "value1", "tag2": "value2"} - properties = { - "Environment": "azureml://registries/apeddau-online-evals-registry/environments/online-eval-env/versions/1" - } evaluation_schedule = EvaluationSchedule( data=app_insights_config, From f4eb82bd9b121406cd5f9df0e3df6b9aa63d411f Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 8 Nov 2024 12:41:12 -0800 Subject: [PATCH 098/138] Fix doc string errors --- .../azure/ai/projects/aio/operations/_patch.py | 2 +- sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py | 4 ++-- .../azure-ai-projects/azure/ai/projects/operations/_patch.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 2771db4188e1..b1921bf2fc70 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -394,7 +394,7 @@ async def get_connection_string(self) -> str: :return: The Application Insights connection string if a the resource was enabled for the Project. :rtype: str - :raises ~azure.core.exceptions.ResourceNotFoundError + :raises ~azure.core.exceptions.ResourceNotFoundError: """ if not self._connection_string: # Get the AI Studio Project properties, including Application Insights resource URL if exists diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 9a4480ed227d..9bd921251209 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -5746,14 +5746,14 @@ class VectorStoreDataSource(_model_base.Model): :ivar asset_identifier: Asset URI. Required. :vartype asset_identifier: str - :ivar asset_type: The asset type *. Required. Known values are: "uri_asset" and "id_asset". + :ivar asset_type: The asset type. Required. Known values are: "uri_asset" and "id_asset". :vartype asset_type: str or ~azure.ai.projects.models.VectorStoreDataSourceAssetType """ asset_identifier: str = rest_field(name="uri") """Asset URI. Required.""" asset_type: Union[str, "_models.VectorStoreDataSourceAssetType"] = rest_field(name="type") - """The asset type *. Required. Known values are: \"uri_asset\" and \"id_asset\".""" + """The asset type. Required. Known values are: \"uri_asset\" and \"id_asset\".""" @overload def __init__( diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 62ad9f7bb06d..f065eb72074a 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -486,7 +486,7 @@ def get_connection_string(self) -> str: :return: The Application Insights connection string if a the resource was enabled for the Project. :rtype: str - :raises ~azure.core.exceptions.ResourceNotFoundError + :raises ~azure.core.exceptions.ResourceNotFoundError: """ if not self._connection_string: # Get the AI Studio Project properties, including Application Insights resource URL if exists From c6edff5d8b0582372adfa1699faededd03683b43 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Fri, 8 Nov 2024 14:55:11 -0800 Subject: [PATCH 099/138] Fix typing in samples (#38354) * Fix samples * Re enable type checks and remove extra code * Remove non needed code * Remove non needed changes * Remove non needed changes * Fixes * Return non optional client * Remove extra line * Fix file names * Fixes * Fix async file * Fix mypy issues and suppress the ones related to typespec * Fix cSpell * Re generate code and fix unit tests * Delete sdk/ai/azure-ai-projects/samples/config.json * Fix typo * Fix unit test --- .../azure/ai/projects/_patch.py | 2 +- .../azure/ai/projects/aio/_patch.py | 44 +++++++++++-- .../ai/projects/aio/operations/_patch.py | 4 +- .../azure/ai/projects/models/_patch.py | 20 +++--- .../azure/ai/projects/operations/_patch.py | 2 +- .../agents/_ai_agents_instrumentor.py | 2 +- sdk/ai/azure-ai-projects/pyproject.toml | 9 ++- .../sample_agents_basics_async.py | 2 +- ...basics_async_with_azure_monitor_tracing.py | 3 +- ...gents_basics_async_with_console_tracing.py | 4 +- .../sample_agents_code_interpreter_async.py | 2 +- ...gents_code_interpreter_attachment_async.py | 2 +- ...eter_attachment_enterprise_search_async.py | 2 +- .../sample_agents_functions_async.py | 17 ++++-- .../sample_agents_run_with_toolset_async.py | 4 +- ...sample_agents_stream_eventhandler_async.py | 2 +- ..._stream_eventhandler_with_toolset_async.py | 3 +- .../sample_agents_stream_iteration_async.py | 2 +- ...tore_batch_enterprise_file_search_async.py | 4 +- ...ts_vector_store_batch_file_search_async.py | 2 +- ...ctor_store_enterprise_file_search_async.py | 4 +- ...e_agents_vector_store_file_search_async.py | 4 +- ...gents_with_file_search_attachment_async.py | 2 +- .../sample_agents_enterprise_file_search.py | 2 +- ...ts_functions_with_azure_monitor_tracing.py | 4 +- ...e_agents_functions_with_console_tracing.py | 2 +- ...eventhandler_with_azure_monitor_tracing.py | 17 +++--- ...tream_eventhandler_with_console_tracing.py | 11 ++-- ...ents_stream_eventhandler_with_functions.py | 6 +- ...agents_stream_eventhandler_with_toolset.py | 3 - .../sample_agents_vector_store_file_search.py | 2 +- ...mple_agents_with_file_search_attachment.py | 9 +-- .../async_samples/sample_connections_async.py | 19 +++--- .../samples/connections/sample_connections.py | 17 +++--- .../async_samples/sample_evaluations_async.py | 1 - .../samples/evaluations/sample_evaluations.py | 5 +- .../sample_evaluations_schedules.py | 1 - ...ai_inference_client_and_console_tracing.py | 2 +- .../tests/agents/test_agents_client.py | 56 ++++++++--------- .../tests/agents/test_agents_client_async.py | 61 ++++++++----------- .../tests/agents/test_vector_store.py | 2 +- .../tests/check_sample_name.sh | 2 +- 42 files changed, 203 insertions(+), 161 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index 95dc6b8ea967..7acec5304675 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -230,7 +230,7 @@ def from_connection_string(cls, conn_str: str, credential: "TokenCredential", ** project_name = parts[3] return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) - def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str]: + def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: """Upload a file to the Azure AI Studio project. This method required *azure-ai-ml* to be installed. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 97373d32cf3a..d70124530096 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -2,14 +2,17 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ +from azure.core.credentials import TokenCredential """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +import asyncio +import concurrent.futures import uuid from os import PathLike from pathlib import Path -from typing import List, Any, Union, Dict, Tuple, TYPE_CHECKING +from typing import List, Any, Union, Dict, Optional, Tuple, TYPE_CHECKING from azure.core import AsyncPipelineClient from azure.core.pipeline import policies from typing_extensions import Self @@ -22,6 +25,7 @@ if TYPE_CHECKING: from azure.core.credentials_async import AsyncTokenCredential + from azure.core.credentials import AccessToken class AIProjectClient(ClientGenerated): @@ -233,14 +237,14 @@ def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential project_name = parts[3] return cls(endpoint, subscription_id, resource_group_name, project_name, credential, **kwargs) - def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str]: + def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: """Upload a file to the Azure AI Studio project. This method required *azure-ai-ml* to be installed. :param file_path: The path to the file to upload. :type file_path: Union[str, Path, PathLike] :return: The tuple, containing asset id and asset URI of uploaded file. - :rtype: Tuple[str] + :rtype: Tuple[str, str] """ try: from azure.ai.ml import MLClient # type: ignore @@ -258,9 +262,10 @@ def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str]: is_anonymous=True, version="1", ) + # We have to wrap async method get_token of ml_client = MLClient( - self._config3.credential, + _SyncCredentialWrapper(self._config3.credential), self._config3.subscription_id, self._config3.resource_group_name, self._config3.project_name, @@ -283,6 +288,37 @@ def scope(self) -> Dict[str, str]: "AIProjectClient", ] # Add all objects you want publicly available to users at this package level +class _SyncCredentialWrapper(TokenCredential): + """ + The class, synchronizing AsyncTokenCredential. + + :param async_credential: The async credential to be synchronized. + :type async_credential: ~azure.core.credentials_async.AsyncTokenCredential + """ + + def __init__(self, async_credential: "AsyncTokenCredential"): + self._async_credential = async_credential + + def get_token( + self, + *scopes:str, + claims:Optional[str]=None, + tenant_id:Optional[str]=None, + enable_cae:bool=False, ** + kwargs:Any) -> "AccessToken": + + pool = concurrent.futures.ThreadPoolExecutor() + return pool.submit( + asyncio.run, + self._async_credential.get_token( + *scopes, + claims=claims, + tenant_id=tenant_id, + enable_cae=enable_cae, + **kwargs + ) + ).result() + def patch_sdk(): """Do not remove from this file. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index b1921bf2fc70..0fae5f10bbed 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -66,7 +66,7 @@ def __init__(self, outer_instance): self._outer_instance = outer_instance @distributed_trace_async - async def get_chat_completions_client(self, **kwargs) -> "Optional[ChatCompletionsClient]": + async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource. At least one AI model that supports chat completions must be deployed in this resource. The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. @@ -136,7 +136,7 @@ async def get_chat_completions_client(self, **kwargs) -> "Optional[ChatCompletio return client @distributed_trace_async - async def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": + async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource. At least one AI model that supports text embeddings must be deployed in this resource. The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 08d483db4895..6d74afc54339 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -993,10 +993,11 @@ async def __anext__(self) -> Tuple[str, StreamEventData]: event_data_str, self.buffer = self.buffer.split("\n\n", 1) return await self._process_event(event_data_str) - def _parse_event_data(self, event_data_str: str) -> Tuple[str, StreamEventData]: + def _parse_event_data(self, event_data_str: str) -> Tuple[str, StreamEventData, str]: event_lines = event_data_str.strip().split("\n") event_type = None event_data = "" + error_string = "" for line in event_lines: if line.startswith("event:"): @@ -1053,11 +1054,12 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, StreamEventData]: event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) else: event_data_obj = parsed_data + error_string = str(parsed_data) - return event_type, event_data_obj + return event_type, event_data_obj, error_string async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData]: - event_type, event_data_obj = self._parse_event_data(event_data_str) + event_type, event_data_obj, error_string = self._parse_event_data(event_data_str) if ( isinstance(event_data_obj, ThreadRun) @@ -1078,7 +1080,7 @@ async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventDat elif isinstance(event_data_obj, RunStepDeltaChunk): await self.event_handler.on_run_step_delta(event_data_obj) elif event_type == AgentStreamEvent.ERROR: - await self.event_handler.on_error(event_data_obj) + await self.event_handler.on_error(error_string) elif event_type == AgentStreamEvent.DONE: await self.event_handler.on_done() self.done = True # Mark the stream as done @@ -1142,10 +1144,11 @@ def __next__(self) -> Tuple[str, StreamEventData]: event_data_str, self.buffer = self.buffer.split("\n\n", 1) return self._process_event(event_data_str) - def _parse_event_data(self, event_data_str: str) -> Tuple[str, StreamEventData]: + def _parse_event_data(self, event_data_str: str) -> Tuple[str, StreamEventData, str]: event_lines = event_data_str.strip().split("\n") event_type = None event_data = "" + error_string = "" for line in event_lines: if line.startswith("event:"): @@ -1200,11 +1203,12 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, StreamEventData]: event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) else: event_data_obj = parsed_data + error_string = str(parsed_data) - return event_type, event_data_obj + return event_type, event_data_obj, error_string def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData]: - event_type, event_data_obj = self._parse_event_data(event_data_str) + event_type, event_data_obj, error_string= self._parse_event_data(event_data_str) if ( isinstance(event_data_obj, ThreadRun) @@ -1225,7 +1229,7 @@ def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData]: elif isinstance(event_data_obj, RunStepDeltaChunk): self.event_handler.on_run_step_delta(event_data_obj) elif event_type == AgentStreamEvent.ERROR: - self.event_handler.on_error(event_data_obj) + self.event_handler.on_error(error_string) elif event_type == AgentStreamEvent.DONE: self.event_handler.on_done() self.done = True # Mark the stream as done diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index f065eb72074a..6214d21e3bc3 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -125,7 +125,7 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": return client @distributed_trace - def get_embeddings_client(self, **kwargs) -> "Optional[EmbeddingsClient]": + def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": """Get an authenticated EmbeddingsClient (from the package azure-ai-inference) for the default Azure AI Services connected resource. At least one AI model that supports text embeddings must be deployed in this resource. The package `azure-ai-inference` must be installed prior to calling this method. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py index 1cb89d70ac56..1a853c8e5817 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py @@ -304,7 +304,7 @@ def _add_message_event( message_id: Optional[str] = None, thread_run_id: Optional[str] = None, message_status: Optional[str] = None, - incomplete_details: Optional[str] = None, + incomplete_details: Optional[MessageIncompleteDetails] = None, usage: Optional[_models.RunStepCompletionUsage] = None, ) -> None: # TODO document new fields diff --git a/sdk/ai/azure-ai-projects/pyproject.toml b/sdk/ai/azure-ai-projects/pyproject.toml index 326df8f77fd8..d9a1436e1c08 100644 --- a/sdk/ai/azure-ai-projects/pyproject.toml +++ b/sdk/ai/azure-ai-projects/pyproject.toml @@ -2,9 +2,14 @@ python_version = "3.8" exclude = [ "downloaded", - "samples" + # Types contains code, generated by typespec. + "_types.py", + # Error in typing caused by the typespec. + "sample_agents_with_file_search_attachment.py", + "sample_agents_with_code_interpreter_file_attachment.py", + "sample_agents_code_interpreter_attachment_enterprise_search.py", + "sample_agents_with_file_search_attachment_async.py" ] warn_unused_configs = true -follow_imports = "skip" ignore_missing_imports = true follow_imports_for_stubs = false \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py index f77fb6738625..de6edb55084b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async.py @@ -29,7 +29,7 @@ import os -async def main(): +async def main() -> None: # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py index b205684bb8a8..2f6154027772 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_azure_monitor_tracing.py @@ -25,7 +25,6 @@ """ import asyncio import time -import sys from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import DefaultAzureCredential from azure.ai.projects.tracing.agents import AIAgentsInstrumentor @@ -38,7 +37,7 @@ @tracer.start_as_current_span(__file__) -async def main(): +async def main() -> None: # Create an Azure AI Project Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py index c9d08f754472..a0e6b26b6cad 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py @@ -41,7 +41,7 @@ @tracer.start_as_current_span(__file__) -async def main(): +async def main() -> None: # Create an Azure AI Project Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" @@ -54,7 +54,7 @@ async def main(): # Enable console tracing # or, if you have local OTLP endpoint running, change it to # project_client.telemetry.enable(destination="http://localhost:4317") - project_client.telemetry.enable(destination=sys.stdout) + await project_client.telemetry.enable(destination=sys.stdout) async with project_client: agent = await project_client.agents.create_agent( diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py index 36e7028d1f2e..ebe2bc07b102 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py @@ -31,7 +31,7 @@ import os -async def main(): +async def main() -> None: # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py index d3c36363d4ba..36b63adae474 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py @@ -26,7 +26,7 @@ from azure.ai.projects.models import CodeInterpreterTool from azure.ai.projects.models import FilePurpose from azure.ai.projects.models import MessageAttachment -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential # Create an Azure AI Client from a connection string, copied from your AI Studio project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py index 5dfc0432ed0c..ce06f5119303 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py @@ -29,7 +29,7 @@ VectorStoreDataSource, VectorStoreDataSourceAssetType, ) -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential # Create an Azure AI Client from a connection string, copied from your AI Studio project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py index 42d5ea9e318e..b4fe9b63c3ff 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py @@ -24,7 +24,12 @@ import time from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models import AsyncFunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction +from azure.ai.projects.models import ( + AsyncFunctionTool, + RequiredFunctionToolCall, + SubmitToolOutputsAction, + ToolOutput +) from azure.identity.aio import DefaultAzureCredential import os @@ -32,7 +37,7 @@ from user_async_functions import user_async_functions -async def main(): +async def main() -> None: # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables @@ -86,10 +91,10 @@ async def main(): try: output = await functions.execute(tool_call) tool_outputs.append( - { - "tool_call_id": tool_call.id, - "output": output, - } + ToolOutput( + tool_call_id = tool_call.id, + output = output, + ) ) except Exception as e: print(f"Error executing tool_call {tool_call.id}: {e}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py index ad3950642ea9..db888c32239b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py @@ -24,7 +24,7 @@ import os, asyncio from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import DefaultAzureCredential -from azure.ai.projects.models import AsyncFunctionTool, AsyncToolSet, CodeInterpreterTool +from azure.ai.projects.models import AsyncFunctionTool, AsyncToolSet from user_async_functions import user_async_functions @@ -33,7 +33,7 @@ # Customer needs to login to Azure subscription via Azure CLI and set the environment variables -async def main(): +async def main() -> None: project_client = AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py index b7d1df7351e6..cde160ef8d23 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -63,7 +63,7 @@ async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") -async def main(): +async def main() -> None: # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py index 0359dc6e00e8..a42c83867d45 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_with_toolset_async.py @@ -26,7 +26,6 @@ from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import MessageDeltaChunk, MessageDeltaTextContent, RunStep, ThreadMessage, ThreadRun from azure.ai.projects.models import AsyncAgentEventHandler, AsyncFunctionTool, AsyncToolSet -from azure.ai.projects.aio.operations import AgentsOperations from azure.identity.aio import DefaultAzureCredential import os @@ -64,7 +63,7 @@ async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: print(f"Unhandled Event Type: {event_type}, Data: {event_data}") -async def main(): +async def main() -> None: # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py index 691ace56eb56..f2341802df4b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_iteration_async.py @@ -30,7 +30,7 @@ import os -async def main(): +async def main() -> None: # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py index afaf45fa5bf8..273f640bcdd6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py @@ -24,7 +24,7 @@ from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential async def main(): @@ -95,7 +95,7 @@ async def main(): print(f"Created run, run ID: {run.id}") await project_client.agents.delete_vector_store(vector_store.id) - print("Deleted vectore store") + print("Deleted vector store") await project_client.agents.delete_agent(agent.id) print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py index 680806f68682..a3fd0de9d8ee 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_file_search_async.py @@ -28,7 +28,7 @@ from azure.identity.aio import DefaultAzureCredential -async def main(): +async def main() -> None: # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py index 16f036e1a95a..0d3e543e3cf3 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py @@ -23,7 +23,7 @@ from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential async def main(): @@ -70,7 +70,7 @@ async def main(): print(f"Created run, run ID: {run.id}") await project_client.agents.delete_vector_store(vector_store.id) - print("Deleted vectore store") + print("Deleted vector store") await project_client.agents.delete_agent(agent.id) print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py index f06151be4359..ef3f0477a6a4 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py @@ -23,7 +23,7 @@ from azure.ai.projects.aio import AIProjectClient from azure.ai.projects.models import FileSearchTool, FilePurpose -from azure.identity import DefaultAzureCredential +from azure.identity.aio import DefaultAzureCredential async def main(): @@ -72,7 +72,7 @@ async def main(): print(f"Created run, run ID: {run.id}") await project_client.agents.delete_vector_store(vector_store.id) - print("Deleted vectore store") + print("Deleted vector store") await project_client.agents.delete_agent(agent.id) print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py index 3eda61dd302a..2a1a97be333f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_with_file_search_attachment_async.py @@ -30,7 +30,7 @@ import os -async def main(): +async def main() -> None: # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" # Customer needs to login to Azure subscription via Azure CLI and set the environment variables diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py index d06618c8e7c7..36ec4c6f4f34 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py @@ -69,7 +69,7 @@ print(f"Created run, run ID: {run.id}") project_client.agents.delete_vector_store(vector_store.id) - print("Deleted vectore store") + print("Deleted vector store") project_client.agents.delete_agent(agent.id) print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py index ca53f180e32e..f4a17b589e28 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_azure_monitor_tracing.py @@ -23,9 +23,9 @@ * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. """ - -import os, sys, time, json from typing import Any, Callable, Set + +import os, time, json from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential from azure.ai.projects.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py index 031314cfa944..1cf7f8261e58 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions_with_console_tracing.py @@ -28,9 +28,9 @@ * AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. """ +from typing import Any, Callable, Set import os, sys, time, json -from typing import Any, Callable, Set from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential from azure.ai.projects.models import FunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py index c033fc1df0dd..dadb7b2cdadd 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py @@ -25,9 +25,8 @@ """ -import os, sys +import os from azure.ai.projects import AIProjectClient -from azure.ai.projects.models._enums import RunStepType from azure.identity import DefaultAzureCredential from azure.ai.projects.models import ( AgentEventHandler, @@ -35,11 +34,11 @@ MessageDeltaChunk, ThreadMessage, ThreadRun, - RunStep, + RunStep ) from typing import Any from opentelemetry import trace -from azure.monitor.opentelemetry import configure_azure_monitor +from azure.monitor.opentelemetry import configure_azure_monitor # Create an Azure AI Project Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" @@ -59,7 +58,12 @@ def on_message_delta(self, delta: "MessageDeltaChunk") -> None: print(f"Text delta received: {text_value}") def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + if len(message.content): + print(f"ThreadMessage created. ID: {message.id}, " + f"Status: {message.status}, Content: {message.content[0].as_dict()}") + else: + print(f"ThreadMessage created. ID: {message.id}, " + f"Status: {message.status}") def on_thread_run(self, run: "ThreadRun") -> None: print(f"ThreadRun status: {run.status}") @@ -67,9 +71,6 @@ def on_thread_run(self, run: "ThreadRun") -> None: def on_run_step(self, step: "RunStep") -> None: print(f"RunStep type: {step.type}, Status: {step.status}") - def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"Message status: {message.status}, Content: {message.content[0].as_dict()}") - def on_error(self, data: str) -> None: print(f"An error occurred. Data: {data}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py index 49320a9555b8..f00781834344 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py @@ -31,7 +31,6 @@ import os, sys from azure.ai.projects import AIProjectClient -from azure.ai.projects.models._enums import RunStepType from azure.identity import DefaultAzureCredential from azure.ai.projects.models import ( AgentEventHandler, @@ -63,7 +62,12 @@ def on_message_delta(self, delta: "MessageDeltaChunk") -> None: print(f"Text delta received: {text_value}") def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + if len(message.content): + print(f"ThreadMessage created. ID: {message.id}, " + f"Status: {message.status}, Content: {message.content[0].as_dict()}") + else: + print(f"ThreadMessage created. ID: {message.id}, " + f"Status: {message.status}") def on_thread_run(self, run: "ThreadRun") -> None: print(f"ThreadRun status: {run.status}") @@ -71,9 +75,6 @@ def on_thread_run(self, run: "ThreadRun") -> None: def on_run_step(self, step: "RunStep") -> None: print(f"RunStep type: {step.type}, Status: {step.status}") - def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"Message status: {message.status}, Content: {message.content[0].as_dict()}") - def on_error(self, data: str) -> None: print(f"An error occurred. Data: {data}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py index a061bf02d7f8..c506a585a6d0 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py @@ -27,10 +27,14 @@ from azure.ai.projects.models import ( AgentEventHandler, FunctionTool, + MessageDeltaChunk, MessageDeltaTextContent, RequiredFunctionToolCall, + RunStep, SubmitToolOutputsAction, - ToolOutput, + ThreadMessage, + ThreadRun, + ToolOutput ) from azure.identity import DefaultAzureCredential from user_functions import user_functions diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py index 0bbbcf3cab88..cf7d92e16e03 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_toolset.py @@ -21,10 +21,8 @@ PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ -import os from azure.ai.projects import AIProjectClient from azure.ai.projects.models import ( - Agent, MessageDeltaChunk, MessageDeltaTextContent, RunStep, @@ -32,7 +30,6 @@ ThreadRun, ) from azure.ai.projects.models import AgentEventHandler -from azure.ai.projects.operations import AgentsOperations from azure.identity import DefaultAzureCredential from azure.ai.projects.models import FunctionTool, ToolSet diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py index a019fd96daf0..eacf51437485 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py @@ -70,7 +70,7 @@ print(f"Created run, run ID: {run.id}") project_client.agents.delete_vector_store(vector_store.id) - print("Deleted vectore store") + print("Deleted vector store") project_client.agents.delete_agent(agent.id) print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py index 2a01e1f1836f..265fc1f7a38b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py @@ -20,12 +20,13 @@ Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ - import os from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import FilePurpose -from azure.ai.projects.models import MessageAttachment -from azure.ai.projects.models import FileSearchTool +from azure.ai.projects.models import ( + FilePurpose, + FileSearchTool, + MessageAttachment +) from azure.identity import DefaultAzureCredential diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py index fb7ee382356e..afd4f442e2d1 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -23,6 +23,7 @@ in your AI Studio Hub page. 3) MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. """ +from typing import cast import asyncio import os @@ -31,7 +32,7 @@ from azure.identity.aio import DefaultAzureCredential -async def sample_connections_async(): +async def sample_connections_async() -> None: project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] connection_name = os.environ["CONNECTION_NAME"] @@ -78,6 +79,7 @@ async def sample_connections_async(): if connection.connection_type == ConnectionType.AZURE_OPEN_AI: from openai import AsyncAzureOpenAI + from azure.core.credentials_async import AsyncTokenCredential if connection.authentication_type == AuthenticationType.API_KEY: print("====> Creating AzureOpenAI client using API key authentication") @@ -93,7 +95,7 @@ async def sample_connections_async(): client = AsyncAzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" + cast(AsyncTokenCredential, connection.token_credential), "https://cognitiveservices.azure.com/.default" ), azure_endpoint=connection.endpoint_url, api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs @@ -116,27 +118,28 @@ async def sample_connections_async(): from azure.ai.inference.aio import ChatCompletionsClient from azure.ai.inference.models import UserMessage + from azure.core.credentials_async import AsyncTokenCredential if connection.authentication_type == AuthenticationType.API_KEY: print("====> Creating ChatCompletionsClient using API key authentication") from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key) + inference_client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key or "") ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential + inference_client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=cast(AsyncTokenCredential, connection.token_credential) ) else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - response = await client.complete( + response = await inference_client.complete( model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] ) - await client.close() + await inference_client.close() print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index 38f1397155c3..5cf2b7d0a297 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -23,14 +23,13 @@ in your AI Studio Hub page. 3) MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. """ +from typing import cast import os from azure.ai.projects import AIProjectClient from azure.ai.projects.models import ConnectionType, AuthenticationType from azure.identity import DefaultAzureCredential -# from azure.identity import DefaultAzureCredential, get_bearer_token_provider - project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] connection_name = os.environ["CONNECTION_NAME"] model_deployment_name = os.environ["MODEL_DEPLOYMENT_NAME"] @@ -86,12 +85,14 @@ ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: print("====> Creating AzureOpenAI client using Entra ID authentication") + from azure.core.credentials import TokenCredential from azure.identity import get_bearer_token_provider client = AzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( - connection.token_credential, "https://cognitiveservices.azure.com/.default" + cast(TokenCredential, connection.token_credential), + "https://cognitiveservices.azure.com/.default" ), azure_endpoint=connection.endpoint_url, api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs @@ -120,18 +121,18 @@ print("====> Creating ChatCompletionsClient using API key authentication") from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key)) + client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key or "")) elif connection.authentication_type == AuthenticationType.ENTRA_ID: # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") - client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=connection.properties.token_credential + inference_client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=connection.token_credential ) else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - response = client.complete( + response = inference_client.complete( model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] ) - client.close() + inference_client.close() print(response.choices[0].message.content) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py index 77b590d93267..0b72c35f469b 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py @@ -21,7 +21,6 @@ PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ import asyncio -import time import os from azure.ai.projects.aio import AIProjectClient diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py index c322d3d3ed3f..8a3dffc1403d 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations.py @@ -21,7 +21,7 @@ PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. """ -import os, time +import os from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential from azure.ai.projects.models import Evaluation, Dataset, EvaluatorConfiguration, ConnectionType @@ -82,5 +82,6 @@ print("----------------------------------------------------------------") print("Created evaluation, evaluation ID: ", get_evaluation_response.id) print("Evaluation status: ", get_evaluation_response.status) -print("AI Studio URI: ", get_evaluation_response.properties["AiStudioEvaluationUri"]) +if isinstance(get_evaluation_response.properties, dict): + print("AI Studio URI: ", get_evaluation_response.properties["AiStudioEvaluationUri"]) print("----------------------------------------------------------------") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py index c78f94c37e17..d4db1e281bfd 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py @@ -4,7 +4,6 @@ from azure.ai.projects.models import ( ApplicationInsightsConfiguration, EvaluatorConfiguration, - SamplingStrategy, EvaluationSchedule, CronTrigger, RecurrenceTrigger, diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py index bb7254a594e8..7a901a20c5f6 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_ai_inference_client_and_console_tracing.py @@ -30,8 +30,8 @@ """ import os import sys -from azure.ai.projects import AIProjectClient from azure.ai.inference.models import UserMessage +from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential project_connection_string = os.environ["PROJECT_CONNECTION_STRING"] diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index 093245923d15..83cc23838f51 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -1,9 +1,4 @@ # pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines # # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. @@ -22,7 +17,6 @@ import functools from azure.ai.projects import AIProjectClient -from azure.core.pipeline.transport import RequestsTransport from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy from azure.ai.projects.models import ( CodeInterpreterTool, @@ -37,10 +31,10 @@ ToolResources, ToolSet, VectorStore, - VectorStoreAzureConfigurations, - VectorStorageConfiguration, - VectorStorageDataSource, - VectorStorageDataSourceAssetType, + VectorStoreConfigurations, + VectorStoreConfiguration, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, ) @@ -1212,9 +1206,9 @@ def _do_test_create_vector_store(self, **kwargs): ds = None else: ds = [ - VectorStorageDataSource( + VectorStoreDataSource( storage_uri=kwargs["azure_ai_projects_data_path"], - asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] vector_store = ai_client.agents.create_vector_store_and_poll( @@ -1232,15 +1226,15 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): assert isinstance(ai_client, AIProjectClient) ds = [ - VectorStorageDataSource( + VectorStoreDataSource( storage_uri=kwargs["azure_ai_projects_data_path"], - asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] fs = FileSearchToolResource( vector_stores=[ - VectorStoreAzureConfigurations( - store_name="my_vector_store", store_configuration=VectorStorageConfiguration(data_sources=ds) + VectorStoreConfigurations( + store_name="my_vector_store", store_configuration=VectorStoreConfiguration(data_sources=ds) ) ] ) @@ -1292,7 +1286,7 @@ def _do_test_create_vector_store_add_file(self, **kwargs): if file_id: ds = None else: - ds = [VectorStorageDataSource(storage_uri=kwargs["azure_ai_projects_data_path"], asset_type="uri_asset")] + ds = [VectorStoreDataSource(storage_uri=kwargs["azure_ai_projects_data_path"], asset_type="uri_asset")] vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id vector_store_file = ai_client.agents.create_vector_store_file( @@ -1327,9 +1321,9 @@ def _do_test_create_vector_store_batch(self, **kwargs): else: file_ids = None ds = [ - VectorStorageDataSource( + VectorStoreDataSource( storage_uri=kwargs["azure_ai_projects_data_path"], - asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") @@ -1380,8 +1374,8 @@ def _test_file_search( @recorded_by_proxy def test_message_attachement_azure(self, **kwargs): """Test message attachment with azure ID.""" - ds = VectorStorageDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ds = VectorStoreDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET ) self._do_test_message_attachment(data_sources=[ds], **kwargs) @@ -1433,8 +1427,8 @@ def _do_test_message_attachment(self, **kwargs): @recorded_by_proxy def test_create_assistant_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" - ds = VectorStorageDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ds = VectorStoreDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET ) self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) @@ -1490,8 +1484,8 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): @recorded_by_proxy def test_create_thread_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" - ds = VectorStorageDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ds = VectorStoreDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET ) self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) @@ -1552,15 +1546,15 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): assert isinstance(ai_client, AIProjectClient) ds = [ - VectorStorageDataSource( + VectorStoreDataSource( storage_uri=kwargs["azure_ai_projects_data_path"], - asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] fs = FileSearchToolResource( vector_stores=[ - VectorStoreAzureConfigurations( - store_name="my_vector_store", store_configuration=VectorStorageConfiguration(data_sources=ds) + VectorStoreConfigurations( + store_name="my_vector_store", store_configuration=VectorStoreConfiguration(data_sources=ds) ) ] ) @@ -1593,8 +1587,8 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): @recorded_by_proxy def test_create_attachment_in_thread_azure(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" - ds = VectorStorageDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ds = VectorStoreDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET ) self._do_test_create_attachment_in_thread_azure(data_sources=[ds], **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py index 24ebd1567982..a331928a03e0 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py @@ -1,16 +1,9 @@ # pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines -# pylint: disable=too-many-lines # # ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ +# cSpell:disable import datetime import functools import json @@ -35,10 +28,10 @@ ToolResources, ToolSet, VectorStore, - VectorStoreAzureConfigurations, - VectorStorageConfiguration, - VectorStorageDataSource, - VectorStorageDataSourceAssetType, + VectorStoreConfigurations, + VectorStoreConfiguration, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, ) @@ -1137,9 +1130,9 @@ async def _do_test_create_vector_store(self, **kwargs): ds = None else: ds = [ - VectorStorageDataSource( + VectorStoreDataSource( storage_uri=kwargs["azure_ai_projects_data_path"], - asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] vector_store = await ai_client.agents.create_vector_store_and_poll( @@ -1172,9 +1165,9 @@ async def _do_test_create_vector_store_add_file(self, **kwargs): ds = None else: ds = [ - VectorStorageDataSource( + VectorStoreDataSource( storage_uri=kwargs["azure_ai_projects_data_path"], - asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") @@ -1211,9 +1204,9 @@ async def _do_test_create_vector_store_batch(self, **kwargs): else: file_ids = None ds = [ - VectorStorageDataSource( + VectorStoreDataSource( storage_uri=kwargs["azure_ai_projects_data_path"], - asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") @@ -1259,8 +1252,8 @@ async def _test_file_search(self, ai_client: AIProjectClient, vector_store: Vect @recorded_by_proxy_async async def test_message_attachement_azure(self, **kwargs): """Test message attachment with azure ID.""" - ds = VectorStorageDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ds = VectorStoreDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET ) await self._do_test_message_attachment(data_sources=[ds], **kwargs) @@ -1317,15 +1310,15 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): assert isinstance(ai_client, AIProjectClient) ds = [ - VectorStorageDataSource( + VectorStoreDataSource( storage_uri=kwargs["azure_ai_projects_data_path"], - asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] fs = FileSearchToolResource( vector_stores=[ - VectorStoreAzureConfigurations( - store_name="my_vector_store", store_configuration=VectorStorageConfiguration(data_sources=ds) + VectorStoreConfigurations( + store_name="my_vector_store", store_configuration=VectorStoreConfiguration(data_sources=ds) ) ] ) @@ -1358,8 +1351,8 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): @recorded_by_proxy_async async def test_create_assistant_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" - ds = VectorStorageDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ds = VectorStoreDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET ) await self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) @@ -1418,8 +1411,8 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): @recorded_by_proxy_async async def test_create_thread_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" - ds = VectorStorageDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ds = VectorStoreDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET ) await self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) @@ -1482,15 +1475,15 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): assert isinstance(ai_client, AIProjectClient) ds = [ - VectorStorageDataSource( + VectorStoreDataSource( storage_uri=kwargs["azure_ai_projects_data_path"], - asset_type=VectorStorageDataSourceAssetType.URI_ASSET, + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] fs = FileSearchToolResource( vector_stores=[ - VectorStoreAzureConfigurations( - store_name="my_vector_store", store_configuration=VectorStorageConfiguration(data_sources=ds) + VectorStoreConfigurations( + store_name="my_vector_store", store_configuration=VectorStoreConfiguration(data_sources=ds) ) ] ) @@ -1523,8 +1516,8 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): @recorded_by_proxy_async async def test_create_attachment_in_thread_azure(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" - ds = VectorStorageDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStorageDataSourceAssetType.URI_ASSET + ds = VectorStoreDataSource( + storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET ) await self._do_test_create_attachment_in_thread_azure(data_sources=[ds], **kwargs) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py b/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py index ffad05175c55..5796e9a7fa3f 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_vector_store.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ - +# cSpell:disable import unittest from azure.ai.projects._model_base import _deserialize from azure.ai.projects.models import _models diff --git a/sdk/ai/azure-ai-projects/tests/check_sample_name.sh b/sdk/ai/azure-ai-projects/tests/check_sample_name.sh index e6a6f70dffa2..d705795e6fc8 100644 --- a/sdk/ai/azure-ai-projects/tests/check_sample_name.sh +++ b/sdk/ai/azure-ai-projects/tests/check_sample_name.sh @@ -1,6 +1,6 @@ #!/bin/bash # This is simple helper script to chreck the name of a file -# the name sgould encounter in two places: +# the name should encounter in two places: # FILE: $fname # ... # python $fname From 131554f3841c655db442f61a7ff123bd35206ab5 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 8 Nov 2024 15:34:12 -0800 Subject: [PATCH 100/138] Run "black" (again) --- .../azure/ai/projects/aio/_patch.py | 25 +++++++++---------- .../azure/ai/projects/models/_patch.py | 2 +- .../sample_agents_functions_async.py | 11 +++----- ...eventhandler_with_azure_monitor_tracing.py | 13 +++++----- ...tream_eventhandler_with_console_tracing.py | 9 ++++--- ...ents_stream_eventhandler_with_functions.py | 2 +- ...mple_agents_with_file_search_attachment.py | 6 +---- .../async_samples/sample_connections_async.py | 3 ++- .../samples/connections/sample_connections.py | 7 +++--- 9 files changed, 36 insertions(+), 42 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index d70124530096..955c0147a8b5 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -3,6 +3,7 @@ # Licensed under the MIT License. # ------------------------------------ from azure.core.credentials import TokenCredential + """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize @@ -262,7 +263,7 @@ def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: is_anonymous=True, version="1", ) - # We have to wrap async method get_token of + # We have to wrap async method get_token of ml_client = MLClient( _SyncCredentialWrapper(self._config3.credential), @@ -288,6 +289,7 @@ def scope(self) -> Dict[str, str]: "AIProjectClient", ] # Add all objects you want publicly available to users at this package level + class _SyncCredentialWrapper(TokenCredential): """ The class, synchronizing AsyncTokenCredential. @@ -301,22 +303,19 @@ def __init__(self, async_credential: "AsyncTokenCredential"): def get_token( self, - *scopes:str, - claims:Optional[str]=None, - tenant_id:Optional[str]=None, - enable_cae:bool=False, ** - kwargs:Any) -> "AccessToken": - + *scopes: str, + claims: Optional[str] = None, + tenant_id: Optional[str] = None, + enable_cae: bool = False, + **kwargs: Any, + ) -> "AccessToken": + pool = concurrent.futures.ThreadPoolExecutor() return pool.submit( asyncio.run, self._async_credential.get_token( - *scopes, - claims=claims, - tenant_id=tenant_id, - enable_cae=enable_cae, - **kwargs - ) + *scopes, claims=claims, tenant_id=tenant_id, enable_cae=enable_cae, **kwargs + ), ).result() diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 6d74afc54339..53c2683eaeb4 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -1208,7 +1208,7 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, StreamEventData, return event_type, event_data_obj, error_string def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData]: - event_type, event_data_obj, error_string= self._parse_event_data(event_data_str) + event_type, event_data_obj, error_string = self._parse_event_data(event_data_str) if ( isinstance(event_data_obj, ThreadRun) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py index b4fe9b63c3ff..4a4b4b2222fc 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_functions_async.py @@ -24,12 +24,7 @@ import time from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models import ( - AsyncFunctionTool, - RequiredFunctionToolCall, - SubmitToolOutputsAction, - ToolOutput -) +from azure.ai.projects.models import AsyncFunctionTool, RequiredFunctionToolCall, SubmitToolOutputsAction, ToolOutput from azure.identity.aio import DefaultAzureCredential import os @@ -92,8 +87,8 @@ async def main() -> None: output = await functions.execute(tool_call) tool_outputs.append( ToolOutput( - tool_call_id = tool_call.id, - output = output, + tool_call_id=tool_call.id, + output=output, ) ) except Exception as e: diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py index dadb7b2cdadd..39dbda8ed490 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_azure_monitor_tracing.py @@ -34,11 +34,11 @@ MessageDeltaChunk, ThreadMessage, ThreadRun, - RunStep + RunStep, ) from typing import Any from opentelemetry import trace -from azure.monitor.opentelemetry import configure_azure_monitor +from azure.monitor.opentelemetry import configure_azure_monitor # Create an Azure AI Project Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" @@ -59,11 +59,12 @@ def on_message_delta(self, delta: "MessageDeltaChunk") -> None: def on_thread_message(self, message: "ThreadMessage") -> None: if len(message.content): - print(f"ThreadMessage created. ID: {message.id}, " - f"Status: {message.status}, Content: {message.content[0].as_dict()}") + print( + f"ThreadMessage created. ID: {message.id}, " + f"Status: {message.status}, Content: {message.content[0].as_dict()}" + ) else: - print(f"ThreadMessage created. ID: {message.id}, " - f"Status: {message.status}") + print(f"ThreadMessage created. ID: {message.id}, " f"Status: {message.status}") def on_thread_run(self, run: "ThreadRun") -> None: print(f"ThreadRun status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py index f00781834344..2e74cdbe7d2a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_console_tracing.py @@ -63,11 +63,12 @@ def on_message_delta(self, delta: "MessageDeltaChunk") -> None: def on_thread_message(self, message: "ThreadMessage") -> None: if len(message.content): - print(f"ThreadMessage created. ID: {message.id}, " - f"Status: {message.status}, Content: {message.content[0].as_dict()}") + print( + f"ThreadMessage created. ID: {message.id}, " + f"Status: {message.status}, Content: {message.content[0].as_dict()}" + ) else: - print(f"ThreadMessage created. ID: {message.id}, " - f"Status: {message.status}") + print(f"ThreadMessage created. ID: {message.id}, " f"Status: {message.status}") def on_thread_run(self, run: "ThreadRun") -> None: print(f"ThreadRun status: {run.status}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py index c506a585a6d0..7f65c06aa1a2 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py @@ -34,7 +34,7 @@ SubmitToolOutputsAction, ThreadMessage, ThreadRun, - ToolOutput + ToolOutput, ) from azure.identity import DefaultAzureCredential from user_functions import user_functions diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py index 265fc1f7a38b..6c179c7b2f04 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_file_search_attachment.py @@ -22,11 +22,7 @@ """ import os from azure.ai.projects import AIProjectClient -from azure.ai.projects.models import ( - FilePurpose, - FileSearchTool, - MessageAttachment -) +from azure.ai.projects.models import FilePurpose, FileSearchTool, MessageAttachment from azure.identity import DefaultAzureCredential diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py index afd4f442e2d1..0577f79b8709 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -95,7 +95,8 @@ async def sample_connections_async() -> None: client = AsyncAzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( - cast(AsyncTokenCredential, connection.token_credential), "https://cognitiveservices.azure.com/.default" + cast(AsyncTokenCredential, connection.token_credential), + "https://cognitiveservices.azure.com/.default", ), azure_endpoint=connection.endpoint_url, api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index 5cf2b7d0a297..3def05d90e95 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -91,8 +91,7 @@ client = AzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( - cast(TokenCredential, connection.token_credential), - "https://cognitiveservices.azure.com/.default" + cast(TokenCredential, connection.token_credential), "https://cognitiveservices.azure.com/.default" ), azure_endpoint=connection.endpoint_url, api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs @@ -121,7 +120,9 @@ print("====> Creating ChatCompletionsClient using API key authentication") from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient(endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key or "")) + client = ChatCompletionsClient( + endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key or "") + ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") From 0008861fb99bc8c77b4f8e26df3e6911381435e4 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Fri, 8 Nov 2024 17:45:21 -0800 Subject: [PATCH 101/138] Fix a few pyright errors, and address a comment in the Python PR (#38435) --- sdk/ai/azure-ai-projects/azure/ai/projects/_types.py | 1 - .../azure/ai/projects/models/_patch.py | 4 ++-- .../sample_agents_stream_eventhandler_async.py | 2 +- .../samples/agents/sample_agents_file_search.py | 2 +- .../agents/sample_agents_with_resources_in_thread.py | 2 +- .../async_samples/sample_connections_async.py | 7 ++++--- .../samples/connections/sample_connections.py | 10 +++++----- .../tests/connections/test_connections_unit_tests.py | 2 +- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py index c438829bda41..b540a961b2f1 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py @@ -10,7 +10,6 @@ if TYPE_CHECKING: from . import models as _models - from .. import models as _models AgentsApiResponseFormatOption = Union[ str, str, "_models.AgentsApiResponseFormatMode", "_models.AgentsApiResponseFormat" ] diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 53c2683eaeb4..8dfcd05d96bb 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -144,8 +144,8 @@ def __init__( ) self.key: Optional[str] = None if hasattr(connection.properties, "credentials"): - if hasattr(connection.properties.credentials, "key"): - self.key = connection.properties.credentials.key + if hasattr(connection.properties.credentials, "key"): # type: ignore + self.key = connection.properties.credentials.key # type: ignore self.token_credential = token_credential def to_evaluator_model_config(self, deployment_name, api_version) -> Dict[str, str]: diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py index cde160ef8d23..13e84b8cf654 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_stream_eventhandler_async.py @@ -31,7 +31,7 @@ ThreadMessage, ThreadRun, ) -from azure.ai.projects.models._patch import AsyncAgentEventHandler +from azure.ai.projects.models import AsyncAgentEventHandler from azure.identity.aio import DefaultAzureCredential import os diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py index dbbd7af862cc..e1b6e3880e58 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_file_search.py @@ -23,7 +23,7 @@ import os from azure.ai.projects import AIProjectClient -from azure.ai.projects.models._patch import FileSearchTool +from azure.ai.projects.models import FileSearchTool from azure.identity import DefaultAzureCredential diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py index bd00e37e6fbf..3cbaceab89a0 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py @@ -23,7 +23,7 @@ import os from azure.ai.projects import AIProjectClient -from azure.ai.projects.models._patch import FileSearchTool +from azure.ai.projects.models import FileSearchTool from azure.identity import DefaultAzureCredential diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py index 0577f79b8709..9807df48cb06 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -83,7 +83,7 @@ async def sample_connections_async() -> None: if connection.authentication_type == AuthenticationType.API_KEY: print("====> Creating AzureOpenAI client using API key authentication") - client = AsyncAzureOpenAI( + aoai_client = AsyncAzureOpenAI( api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs @@ -92,7 +92,7 @@ async def sample_connections_async() -> None: print("====> Creating AzureOpenAI client using Entra ID authentication") from azure.identity.aio import get_bearer_token_provider - client = AsyncAzureOpenAI( + aoai_client = AsyncAzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( cast(AsyncTokenCredential, connection.token_credential), @@ -104,7 +104,7 @@ async def sample_connections_async() -> None: else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - response = await client.chat.completions.create( + response = await aoai_client.chat.completions.create( model=model_deployment_name, messages=[ { @@ -113,6 +113,7 @@ async def sample_connections_async() -> None: }, ], ) + await aoai_client.close() print(response.choices[0].message.content) elif connection.connection_type == ConnectionType.AZURE_AI_SERVICES: diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index 3def05d90e95..d4db78b2ccac 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -78,7 +78,7 @@ if connection.authentication_type == AuthenticationType.API_KEY: print("====> Creating AzureOpenAI client using API key authentication") - client = AzureOpenAI( + aoai_client = AzureOpenAI( api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version="2024-06-01", # See "Data plane - inference" row in table https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs @@ -88,7 +88,7 @@ from azure.core.credentials import TokenCredential from azure.identity import get_bearer_token_provider - client = AzureOpenAI( + aoai_client = AzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( cast(TokenCredential, connection.token_credential), "https://cognitiveservices.azure.com/.default" @@ -99,7 +99,7 @@ else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - response = client.chat.completions.create( + response = aoai_client.chat.completions.create( model=model_deployment_name, messages=[ { @@ -108,7 +108,7 @@ }, ], ) - client.close() + aoai_client.close() print(response.choices[0].message.content) elif connection.connection_type == ConnectionType.SERVERLESS: @@ -120,7 +120,7 @@ print("====> Creating ChatCompletionsClient using API key authentication") from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient( + inference_client = ChatCompletionsClient( endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key or "") ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py index 9e505e8b272f..dc28671bb667 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_unit_tests.py @@ -11,7 +11,7 @@ from azure.core.credentials import TokenCredential, AccessToken from azure.core.exceptions import HttpResponseError from connection_test_base import ConnectionsTestBase -from azure.ai.projects.models._patch import ConnectionProperties +from azure.ai.projects.models import ConnectionProperties from azure.ai.projects.models._models import GetConnectionResponse from unittest.mock import MagicMock, patch From 9369a5d78891eaf1d0b65732074fc123a9fea1df Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Mon, 11 Nov 2024 08:31:38 -0800 Subject: [PATCH 102/138] Address some of Krista's PR comments (#38444) --- sdk/ai/azure-ai-projects/azure/ai/projects/_types.py | 6 ++---- .../azure/ai/projects/aio/operations/_patch.py | 8 ++++---- .../azure-ai-projects/azure/ai/projects/models/_patch.py | 5 +++-- .../azure/ai/projects/operations/_patch.py | 8 ++++---- sdk/ai/azure-ai-projects/setup.py | 1 + 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py index b540a961b2f1..3e102290d9dc 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_types.py @@ -10,8 +10,6 @@ if TYPE_CHECKING: from . import models as _models -AgentsApiResponseFormatOption = Union[ - str, str, "_models.AgentsApiResponseFormatMode", "_models.AgentsApiResponseFormat" -] +AgentsApiResponseFormatOption = Union[str, "_models.AgentsApiResponseFormatMode", "_models.AgentsApiResponseFormat"] MessageAttachmentToolDefinition = Union["_models.CodeInterpreterToolDefinition", "_models.FileSearchToolDefinition"] -AgentsApiToolChoiceOption = Union[str, str, "_models.AgentsApiToolChoiceOptionMode", "_models.AgentsNamedToolChoice"] +AgentsApiToolChoiceOption = Union[str, "_models.AgentsApiToolChoiceOptionMode", "_models.AgentsNamedToolChoice"] diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 0fae5f10bbed..0db4b79b04a4 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -125,11 +125,12 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" ) client = ChatCompletionsClient(endpoint=endpoint, credential=connection.properties.token_credential) elif connection.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" ) - client = ChatCompletionsClient(endpoint=endpoint, credential=connection.token_credential) + raise ValueError( + "Getting chat completions client from a connection with SAS authentication is not yet supported" + ) else: raise ValueError("Unknown authentication type") @@ -195,11 +196,10 @@ async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": ) client = EmbeddingsClient(endpoint=endpoint, credential=connection.properties.token_credential) elif connection.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" ) - client = EmbeddingsClient(endpoint=endpoint, credential=connection.token_credential) + raise ValueError("Getting embeddings client from a connection with SAS authentication is not yet supported") else: raise ValueError("Unknown authentication type") diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 8dfcd05d96bb..5c73ea87037c 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -144,8 +144,8 @@ def __init__( ) self.key: Optional[str] = None if hasattr(connection.properties, "credentials"): - if hasattr(connection.properties.credentials, "key"): # type: ignore - self.key = connection.properties.credentials.key # type: ignore + if hasattr(connection.properties.credentials, "key"): # type: ignore + self.key = connection.properties.credentials.key # type: ignore self.token_credential = token_credential def to_evaluator_model_config(self, deployment_name, api_version) -> Dict[str, str]: @@ -189,6 +189,7 @@ def __str__(self): return out +# TODO: Look into adding an async version of this class class SASTokenCredential(TokenCredential): def __init__( self, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 6214d21e3bc3..63f1596725aa 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -114,11 +114,12 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": ) client = ChatCompletionsClient(endpoint=endpoint, credential=connection.properties.token_credential) elif connection.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. logger.debug( "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" ) - client = ChatCompletionsClient(endpoint=endpoint, credential=connection.token_credential) + raise ValueError( + "Getting chat completions client from a connection with SAS authentication is not yet supported" + ) else: raise ValueError("Unknown authentication type") @@ -184,11 +185,10 @@ def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": ) client = EmbeddingsClient(endpoint=endpoint, credential=connection.properties.token_credential) elif connection.authentication_type == AuthenticationType.SAS: - # TODO - Not yet supported by the service. Expected 9/27. logger.debug( "[InferenceOperations.get_embeddings_client] Creating EmbeddingsClient using SAS authentication" ) - client = EmbeddingsClient(endpoint=endpoint, credential=connection.token_credential) + raise ValueError("Getting embeddings client from a connection with SAS authentication is not yet supported") else: raise ValueError("Unknown authentication type") diff --git a/sdk/ai/azure-ai-projects/setup.py b/sdk/ai/azure-ai-projects/setup.py index a3190a395e7d..2f225140b1e5 100644 --- a/sdk/ai/azure-ai-projects/setup.py +++ b/sdk/ai/azure-ai-projects/setup.py @@ -47,6 +47,7 @@ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "License :: OSI Approved :: MIT License", ], zip_safe=False, From 2a7d1eb43f08f8a23423444df4b3b60e26b1a225 Mon Sep 17 00:00:00 2001 From: Sai Kothinti Date: Mon, 11 Nov 2024 22:24:20 +0530 Subject: [PATCH 103/138] sdk changes for name change for provisioning state and isEnabled online evaluation (#38298) * sdk changes for name change for provisioning state and isEnabled online evaluation * add correct tsp-location.yaml --- .../azure/ai/projects/models/_models.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 9bd921251209..e1150d4e56aa 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -879,13 +879,16 @@ class EvaluationSchedule(_model_base.Model): :vartype description: str :ivar system_data: Metadata containing createdBy and modifiedBy information. :vartype system_data: ~azure.ai.projects.models.SystemData - :ivar provisioning_status: Status of the evaluation. It is set by service and is read-only. - :vartype provisioning_status: str + :ivar provisioning_state: Provisioning State of the evaluation. It is set by service and is + read-only. + :vartype provisioning_state: str :ivar tags: Evaluation's tags. Unlike properties, tags are fully mutable. :vartype tags: dict[str, str] :ivar properties: Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be removed. :vartype properties: dict[str, str] + :ivar is_enabled: Enabled status of the evaluation. It is set by service and is read-only. + :vartype is_enabled: str :ivar evaluators: Evaluators to be used for the evaluation. Required. :vartype evaluators: dict[str, ~azure.ai.projects.models.EvaluatorConfiguration] :ivar trigger: Trigger for the evaluation. Required. @@ -901,13 +904,15 @@ class EvaluationSchedule(_model_base.Model): evaluation and is mutable.""" system_data: Optional["_models.SystemData"] = rest_field(name="systemData", visibility=["read"]) """Metadata containing createdBy and modifiedBy information.""" - provisioning_status: Optional[str] = rest_field(name="provisioningStatus", visibility=["read"]) - """Status of the evaluation. It is set by service and is read-only.""" + provisioning_state: Optional[str] = rest_field(name="provisioningState", visibility=["read"]) + """Provisioning State of the evaluation. It is set by service and is read-only.""" tags: Optional[Dict[str, str]] = rest_field() """Evaluation's tags. Unlike properties, tags are fully mutable.""" properties: Optional[Dict[str, str]] = rest_field(visibility=["read", "create"]) """Evaluation's properties. Unlike tags, properties are add-only. Once added, a property cannot be removed.""" + is_enabled: Optional[str] = rest_field(name="isEnabled", visibility=["read"]) + """Enabled status of the evaluation. It is set by service and is read-only.""" evaluators: Dict[str, "_models.EvaluatorConfiguration"] = rest_field(visibility=["read", "create"]) """Evaluators to be used for the evaluation. Required.""" trigger: "_models.Trigger" = rest_field() From b31889fd546d0c1cc8ba3c822ca92fb6483a22b2 Mon Sep 17 00:00:00 2001 From: Liudmila Molkova Date: Mon, 11 Nov 2024 09:00:03 -0800 Subject: [PATCH 104/138] azure-ai-projects: configure otel logging in telemetry.enable() helper (#38434) * Configure otel events in projects.telemetry.enable() --- .../ai/projects/aio/operations/_patch.py | 11 +- .../azure/ai/projects/operations/_patch.py | 157 +++++++++++++----- ...openai_client_and_azure_monitor_tracing.py | 2 +- ...azure_openai_client_and_console_tracing.py | 2 +- 4 files changed, 128 insertions(+), 44 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 0db4b79b04a4..ab952981ea1f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -415,7 +415,8 @@ async def get_connection_string(self) -> str: # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? # TODO: This could be a class method. But we don't have a class property AIProjectClient.telemetry async def enable(self, *, destination: Union[TextIO, str, None] = None, **kwargs) -> None: - """Enables telemetry collection with OpenTelemetry for Azure AI clients and popular GenAI libraries. + """Enables distributed tracing and logging with OpenTelemetry for Azure AI clients and + popular GenAI libraries. Following instrumentations are enabled (when corresponding packages are installed): @@ -431,11 +432,11 @@ async def enable(self, *, destination: Union[TextIO, str, None] = None, **kwargs stdout or OTLP (OpenTelemetry protocol) gRPC endpoint. It's recommended for local development only. For production use, make sure to configure OpenTelemetry SDK directly. - :keyword destination: Recommended for local testing only. Set it to `sys.stdout` for - tracing to console output, or a string holding the OpenTelemetry protocol (OTLP) - endpoint such as "http://localhost:4317. + :keyword destination: Recommended for local testing only. Set it to `sys.stdout` to print + traces and logs to console output, or a string holding the OpenTelemetry protocol (OTLP) + endpoint such as "http://localhost:4317". If not provided, the method enables instrumentations, but does not configure OpenTelemetry - SDK to export traces. + SDK to export traces and logs. :paramtype destination: Union[TextIO, str, None] """ _enable_telemetry(destination=destination, **kwargs) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 63f1596725aa..3dd25e7c64d4 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -365,60 +365,143 @@ def list( return connection_properties_list -# Internal helper function to enable tracing, used by both sync and async clients -def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: - """Enable tracing to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) endpoint. - - :keyword destination: `sys.stdout` for tracing to console output, or a string holding the - OpenTelemetry protocol (OTLP) endpoint. - If not provided, this method enables instrumentation, but does not configure OpenTelemetry - SDK to export traces. - :paramtype destination: Union[TextIO, str, None] - """ +# Internal helper functions to enable OpenTelemetry, used by both sync and async clients +def _get_trace_exporter(destination: Union[TextIO, str, None]) -> Any: if isinstance(destination, str): # `destination` is the OTLP endpoint # See: https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html#usage - try: - from opentelemetry import trace - from opentelemetry.sdk.trace import TracerProvider - from opentelemetry.sdk.trace.export import SimpleSpanProcessor - except ModuleNotFoundError as _: - raise ModuleNotFoundError( - "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" - ) try: from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter # type: ignore except ModuleNotFoundError as _: raise ModuleNotFoundError( "OpenTelemetry OTLP exporter is not installed. Please install it using 'pip install opentelemetry-exporter-otlp-proto-grpc'" ) - trace.set_tracer_provider(TracerProvider()) - # get_tracer_provider returns opentelemetry.trace.TracerProvider - # however, we have opentelemetry.sdk.trace.TracerProvider, which implements - # add_span_processor method, though we need to cast it to fix type checking. - tp = cast(TracerProvider, trace.get_tracer_provider()) - tp.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint=destination))) - - elif isinstance(destination, io.TextIOWrapper): + return OTLPSpanExporter(endpoint=destination) + + if isinstance(destination, io.TextIOWrapper): if destination is sys.stdout: # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter try: - from opentelemetry import trace - from opentelemetry.sdk.trace import TracerProvider - from opentelemetry.sdk.trace.export import SimpleSpanProcessor, ConsoleSpanExporter + from opentelemetry.sdk.trace.export import ConsoleSpanExporter except ModuleNotFoundError as _: raise ModuleNotFoundError( "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" ) - trace.set_tracer_provider(TracerProvider()) - # get_tracer_provider returns opentelemetry.trace.TracerProvider - # however, we have opentelemetry.sdk.trace.TracerProvider, which implements - # add_span_processor method, though we need to cast it to fix type checking. - tp = cast(TracerProvider, trace.get_tracer_provider()) - tp.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter())) + + return ConsoleSpanExporter() + else: + raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") + + return None + +def _get_log_exporter(destination: Union[TextIO, str, None]) -> Any: + if isinstance(destination, str): + # `destination` is the OTLP endpoint + # See: https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html#usage + try: + # _logs are considered beta (not internal) in OpenTelemetry Python API/SDK. + # So it's ok to use it for local development, but we'll swallow + # any errors in case of any breaking changes on OTel side. + from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter # type: ignore + except Exception as ex: + # since OTel logging is still in beta in Python, we're going to swallow any errors + # and just warn about them. + logger.warning( + "Failed to configure OpenTelemetry logging.", exc_info=ex + ) + return None + + return OTLPLogExporter(endpoint=destination) + + if isinstance(destination, io.TextIOWrapper): + if destination is sys.stdout: + # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter + try: + from opentelemetry.sdk._logs.export import ConsoleLogExporter + except ModuleNotFoundError as _: + # since OTel logging is still in beta in Python, we're going to swallow any errors + # and just warn about them. + logger.warning( + "Failed to configure OpenTelemetry logging.", exc_info=ex + ) + + return ConsoleLogExporter() else: raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") + return None + +def _configure_tracing(span_exporter: Any) -> None: + if span_exporter is None: + return + + try: + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.trace.export import SimpleSpanProcessor + except ModuleNotFoundError as _: + raise ModuleNotFoundError( + "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" + ) + + # if tracing was not setup before, we need to create a new TracerProvider + if not isinstance(trace.get_tracer_provider(), TracerProvider): + # If the provider is NoOpTracerProvider, we need to create a new TracerProvider + provider = TracerProvider() + trace.set_tracer_provider(provider) + + # get_tracer_provider returns opentelemetry.trace.TracerProvider + # however, we have opentelemetry.sdk.trace.TracerProvider, which implements + # add_span_processor method, though we need to cast it to fix type checking. + provider = cast(TracerProvider, trace.get_tracer_provider()) + provider.add_span_processor(SimpleSpanProcessor(span_exporter)) + +def _configure_logging(log_exporter: Any) -> None: + if log_exporter is None: + return + + try: + # _events and _logs are considered beta (not internal) in + # OpenTelemetry Python API/SDK. + # So it's ok to use them for local development, but we'll swallow + # any errors in case of any breaking changes on OTel side. + from opentelemetry import _logs, _events + from opentelemetry.sdk._logs import LoggerProvider + from opentelemetry.sdk._events import EventLoggerProvider + from opentelemetry.sdk._logs.export import SimpleLogRecordProcessor + + if not isinstance(_logs.get_logger_provider(), LoggerProvider): + logger_provider = LoggerProvider() + _logs.set_logger_provider(logger_provider) + + # get_logger_provider returns opentelemetry._logs.LoggerProvider + # however, we have opentelemetry.sdk._logs.LoggerProvider, which implements + # add_log_record_processor method, though we need to cast it to fix type checking. + logger_provider = cast(LoggerProvider, _logs.get_logger_provider()) + logger_provider.add_log_record_processor(SimpleLogRecordProcessor(log_exporter)) + _events.set_event_logger_provider(EventLoggerProvider(logger_provider)) + except Exception as ex: + # since OTel logging is still in beta in Python, we're going to swallow any errors + # and just warn about them. + logger.warning( + "Failed to configure OpenTelemetry logging.", exc_info=ex + ) + +def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: + """Enable tracing and logging to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) endpoint. + + :keyword destination: `sys.stdout` to print telemetry to console or a string holding the + OpenTelemetry protocol (OTLP) endpoint. + If not provided, this method enables instrumentation, but does not configure OpenTelemetry + SDK to export traces and logs. + :paramtype destination: Union[TextIO, str, None] + """ + span_exporter = _get_trace_exporter(destination) + _configure_tracing(span_exporter) + + log_exporter = _get_log_exporter(destination) + _configure_logging(log_exporter) + # Silently try to load a set of relevant Instrumentors try: from azure.core.settings import settings @@ -447,7 +530,7 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: if not instrumentor.is_instrumented(): instrumentor.instrument() except Exception as exc: - logger.warning("Could not call `AIAgentsInstrumentor().instrument()` " + str(exc)) + logger.warning("Could not call `AIAgentsInstrumentor().instrument()`", exc_info=exc) try: from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor # type: ignore @@ -455,7 +538,7 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: OpenAIInstrumentor().instrument() except ModuleNotFoundError as _: logger.warning( - "Could not call `OpenAIInstrumentor().instrument()` since `opentelemetry-instrumentation-openai` is not installed" + "Could not call `OpenAIInstrumentor().instrument()` since `opentelemetry-instrumentation-openai-v2` is not installed" ) try: diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py index 431344daec56..b2ec67a625f1 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_azure_monitor_tracing.py @@ -20,7 +20,7 @@ Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. - * OTEL_INSTRUMENTATION_OPENAI_CAPTURE_MESSAGE_CONTENT - Optional. Set to `true` to trace the content of chat + * OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. Update the Azure OpenAI api-version as needed (see `api_version=` below). Values can be found here: diff --git a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py index a2b2b55af982..9d77ee25d79a 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/inference/sample_chat_completions_with_azure_openai_client_and_console_tracing.py @@ -25,7 +25,7 @@ Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - The Azure AI Project connection string, as found in your AI Studio Project. * MODEL_DEPLOYMENT_NAME - The model deployment name, as found in your AI Studio Project. - * OTEL_INSTRUMENTATION_OPENAI_CAPTURE_MESSAGE_CONTENT - Optional. Set to `true` to trace the content of chat + * OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT - Optional. Set to `true` to trace the content of chat messages, which may contain personal data. False by default. Update the Azure OpenAI api-version as needed (see `api_version=` below). Values can be found here: From a204c170b518b557295f082ad9b1d14cf4e0fc81 Mon Sep 17 00:00:00 2001 From: kdestin <101366538+kdestin@users.noreply.github.com> Date: Mon, 11 Nov 2024 12:57:32 -0500 Subject: [PATCH 105/138] [ai] fix(pylint): Resolve `W1309`, `W0107`, `W0707`, `R1705`, `W0401`, `R1714`, `W0611`, `W0404`, `W0201`, `W1201` and `W1203` (#38447) * style: Run isort * fix(pylint): Resolve W1309(f-string-without-interpolation) From sdk/ai/azure-ai-projects, run: ruff check --select=F541 azure --fix * fix(pylint): Resolve W0107(unnecessary-pass) From sdk/ai/azure-ai-projects, run ruff check --select=PIE790 --fix azure * fix(pylint): Resolve W0707(raise-missing-from) * fix(pylint): Resolve R1705(no-else-return) From azure/ai/azure-ai-projects, run ruff check --select=RET505 --fix azure * fix(pylint): Resolve W0401(wildcard-import) * fix(pylint): Resolve R1714(consider-using-in) * fix(pylint): Resolve W0611(unused-import) * fix(pylint): Resolve W0404(reimported) * fix(pylint): Resolve W0201(attribute-defined-outside-init) * fix(pylint): Resolve W1201(logging-not-lazy) and W1203(logging-fstring-interpolation) --- .../azure/ai/projects/_patch.py | 17 ++- .../azure/ai/projects/aio/_patch.py | 21 +-- .../ai/projects/aio/operations/_patch.py | 89 ++++++----- .../azure/ai/projects/models/_patch.py | 143 ++++++++---------- .../azure/ai/projects/operations/_patch.py | 106 ++++++------- .../agents/_ai_agents_instrumentor.py | 87 ++++++----- .../ai/projects/telemetry/agents/_utils.py | 6 +- sdk/ai/azure-ai-projects/pyproject.toml | 15 +- 8 files changed, 247 insertions(+), 237 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index 7acec5304675..f045c051ce7f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -9,15 +9,18 @@ import uuid from os import PathLike from pathlib import Path -from typing import List, Any, Union, Dict, Tuple +from typing import Any, Dict, List, Tuple, Union + from typing_extensions import Self -from azure.core.credentials import TokenCredential + from azure.core import PipelineClient +from azure.core.credentials import TokenCredential from azure.core.pipeline import policies + +from ._client import AIProjectClient as ClientGenerated from ._configuration import AIProjectClientConfiguration from ._serialization import Deserializer, Serializer from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations, TelemetryOperations -from ._client import AIProjectClient as ClientGenerated from .operations._patch import InferenceOperations @@ -56,7 +59,7 @@ def __init__( # For getting AppInsights connection string from the AppInsights resource. # The AppInsights resource URL is not known at this point. We need to get it from the AzureML "Workspace - Get" REST API call. It will have # the form: https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} - _endpoint0 = f"https://management.azure.com" # pylint: disable=line-too-long + _endpoint0 = "https://management.azure.com" # pylint: disable=line-too-long self._config0: AIProjectClientConfiguration = AIProjectClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, @@ -241,12 +244,12 @@ def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: """ try: from azure.ai.ml import MLClient # type: ignore - from azure.ai.ml.entities import Data # type: ignore from azure.ai.ml.constants import AssetTypes # type: ignore - except ImportError: + from azure.ai.ml.entities import Data # type: ignore + except ImportError as e: raise ImportError( "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" - ) + ) from e data = Data( path=str(file_path), diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 955c0147a8b5..61fc91d2fbb7 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -2,8 +2,6 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -from azure.core.credentials import TokenCredential - """Customize generated code here. Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize @@ -13,20 +11,23 @@ import uuid from os import PathLike from pathlib import Path -from typing import List, Any, Union, Dict, Optional, Tuple, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union + +from typing_extensions import Self + from azure.core import AsyncPipelineClient +from azure.core.credentials import TokenCredential from azure.core.pipeline import policies -from typing_extensions import Self from .._serialization import Deserializer, Serializer +from ._client import AIProjectClient as ClientGenerated from ._configuration import AIProjectClientConfiguration from .operations import AgentsOperations, ConnectionsOperations, EvaluationsOperations, TelemetryOperations -from ._client import AIProjectClient as ClientGenerated from .operations._patch import InferenceOperations if TYPE_CHECKING: - from azure.core.credentials_async import AsyncTokenCredential from azure.core.credentials import AccessToken + from azure.core.credentials_async import AsyncTokenCredential class AIProjectClient(ClientGenerated): @@ -64,7 +65,7 @@ def __init__( # For getting AppInsights connection string from the AppInsights resource. # The AppInsights resource URL is not known at this point. We need to get it from the AzureML "Workspace - Get" REST API call. It will have # the form: https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} - _endpoint0 = f"https://management.azure.com" # pylint: disable=line-too-long + _endpoint0 = "https://management.azure.com" # pylint: disable=line-too-long self._config0: AIProjectClientConfiguration = AIProjectClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, @@ -249,12 +250,12 @@ def upload_file(self, file_path: Union[Path, str, PathLike]) -> Tuple[str, str]: """ try: from azure.ai.ml import MLClient # type: ignore - from azure.ai.ml.entities import Data # type: ignore from azure.ai.ml.constants import AssetTypes # type: ignore - except ImportError: + from azure.ai.ml.entities import Data # type: ignore + except ImportError as e: raise ImportError( "azure-ai-ml must be installed to use this function. Please install it using `pip install azure-ai-ml`" - ) + ) from e data = Data( path=str(file_path), diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index ab952981ea1f..47543c19d484 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -7,52 +7,53 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -from ..._vendor import FileType -import io, asyncio +import asyncio +import io import logging import os import time from pathlib import Path -from azure.core.exceptions import ResourceNotFoundError from typing import ( IO, + TYPE_CHECKING, Any, AsyncIterator, Dict, List, MutableMapping, Optional, + Sequence, + TextIO, Union, cast, overload, - Sequence, - TYPE_CHECKING, - TextIO, ) -from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated -from ._operations import AgentsOperations as AgentsOperationsGenerated -from ._operations import TelemetryOperations as TelemetryOperationsGenerated -from ...models._patch import ConnectionProperties +from azure.core.exceptions import ResourceNotFoundError +from azure.core.tracing.decorator_async import distributed_trace_async + +from ... import models as _models +from ..._vendor import FileType from ...models._enums import AuthenticationType, ConnectionType, FilePurpose from ...models._models import ( - GetConnectionResponse, - ListConnectionsResponse, GetAppInsightsResponse, + GetConnectionResponse, GetWorkspaceResponse, InternalConnectionPropertiesSASAuth, + ListConnectionsResponse, ) -from ... import models as _models +from ...models._patch import ConnectionProperties from ...operations._patch import _enable_telemetry -from azure.core.tracing.decorator_async import distributed_trace_async - +from ._operations import AgentsOperations as AgentsOperationsGenerated +from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated +from ._operations import TelemetryOperations as TelemetryOperationsGenerated if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports - from azure.ai.projects import _types - from azure.ai.inference.aio import ChatCompletionsClient, EmbeddingsClient from openai import AsyncAzureOpenAI - from azure.identity.aio import get_bearer_token_provider + + from azure.ai.inference.aio import ChatCompletionsClient, EmbeddingsClient + from azure.ai.projects import _types logger = logging.getLogger(__name__) @@ -98,10 +99,10 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" try: from azure.ai.inference.aio import ChatCompletionsClient - except ModuleNotFoundError as _: + except ModuleNotFoundError as e: raise ModuleNotFoundError( "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) + ) from e if use_serverless_connection: endpoint = connection.endpoint_url @@ -169,10 +170,10 @@ async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": try: from azure.ai.inference.aio import EmbeddingsClient - except ModuleNotFoundError as _: + except ModuleNotFoundError as e: raise ModuleNotFoundError( "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) + ) from e if use_serverless_connection: endpoint = connection.endpoint_url @@ -232,8 +233,10 @@ async def get_azure_openai_client(self, *, api_version: Optional[str] = None, ** try: from openai import AsyncAzureOpenAI - except ModuleNotFoundError as _: - raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai-async'") + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + "OpenAI SDK is not installed. Please install it using 'pip install openai-async'" + ) from e if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( @@ -242,22 +245,18 @@ async def get_azure_openai_client(self, *, api_version: Optional[str] = None, ** client = AsyncAzureOpenAI( api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=api_version ) - elif ( - connection.authentication_type == AuthenticationType.ENTRA_ID - or connection.authentication_type == AuthenticationType.SAS - ): - + elif connection.authentication_type in {AuthenticationType.ENTRA_ID, AuthenticationType.SAS}: try: from azure.identity.aio import get_bearer_token_provider - except ModuleNotFoundError as _: + except ModuleNotFoundError as e: raise ModuleNotFoundError( "azure.identity package not installed. Please install it using 'pip install azure-identity'" - ) + ) from e if connection.authentication_type == AuthenticationType.ENTRA_ID: auth = "Creating AzureOpenAI using Entra ID authentication" else: auth = "Creating AzureOpenAI using SAS authentication" - logger.debug(f"[InferenceOperations.get_azure_openai_client] {auth}") + logger.debug("[InferenceOperations.get_azure_openai_client] %s", auth) client = AsyncAzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( @@ -302,8 +301,7 @@ async def get_default( return await self.get( connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs ) - else: - return connection_properties_list[0] + return connection_properties_list[0] raise ResourceNotFoundError(f"No connection of type {connection_type} found") @distributed_trace_async @@ -330,7 +328,7 @@ async def get(self, *, connection_name: str, with_credentials: bool = False, **k ) if connection.properties.auth_type == AuthenticationType.ENTRA_ID: return ConnectionProperties(connection=connection, token_credential=self._config.credential) - elif connection.properties.auth_type == AuthenticationType.SAS: + if connection.properties.auth_type == AuthenticationType.SAS: from ...models._patch import SASTokenCredential cred_prop = cast(InternalConnectionPropertiesSASAuth, connection.properties) @@ -346,9 +344,8 @@ async def get(self, *, connection_name: str, with_credentials: bool = False, **k return ConnectionProperties(connection=connection, token_credential=token_credential) return ConnectionProperties(connection=connection) - else: - connection = await self._get_connection(connection_name=connection_name, **kwargs) - return ConnectionProperties(connection=connection) + connection = await self._get_connection(connection_name=connection_name, **kwargs) + return ConnectionProperties(connection=connection) @distributed_trace_async async def list( @@ -444,6 +441,10 @@ async def enable(self, *, destination: Union[TextIO, str, None] = None, **kwargs class AgentsOperations(AgentsOperationsGenerated): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._toolset: Optional[_models.AsyncToolSet] = None + @overload async def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: """Creates a new agent. @@ -951,9 +952,7 @@ def _get_toolset(self) -> Optional[_models.AsyncToolSet]: :return: The toolset for the agent. If not set, returns None. :rtype: ~azure.ai.projects.models.AsyncToolSet """ - if hasattr(self, "_toolset"): - return self._toolset - return None + return self._toolset @overload async def create_run( @@ -1880,7 +1879,7 @@ async def _handle_submit_tool_outputs( logger.warning("Toolset is not available in the client.") return - logger.info(f"Tool outputs: {tool_outputs}") + logger.info("Tool outputs: %s", tool_outputs) if tool_outputs: async with await self.submit_tool_outputs_to_stream( thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler @@ -1981,7 +1980,7 @@ async def upload_file( return await super().upload_file(file=file_content, purpose=purpose, **kwargs) except IOError as e: - raise IOError(f"Unable to read file: {file_path}. Reason: {str(e)}") + raise IOError(f"Unable to read file: {file_path}.") from e raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") @@ -2636,10 +2635,10 @@ def write_file(collected_chunks: list): loop = asyncio.get_running_loop() await loop.run_in_executor(None, write_file, chunks) - logger.debug(f"File '{sanitized_file_name}' saved successfully at '{target_file_path}'.") + logger.debug("File '%s' saved successfully at '%s'.", sanitized_file_name, target_file_path) except (ValueError, RuntimeError, TypeError, IOError) as e: - logger.error(f"An error occurred in save_file: {e}") + logger.error("An error occurred in save_file: %s", e) raise diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 5c73ea87037c..cb155bc4f37e 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -7,67 +7,66 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ +import asyncio +import base64 import datetime import inspect import json import logging import math -import base64 -import asyncio import re +from abc import ABC, abstractmethod +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Dict, + Iterator, + List, + Optional, + Set, + Tuple, + Type, + Union, + cast, + get_args, + get_origin, +) -from azure.core.credentials import TokenCredential, AccessToken +from azure.core.credentials import AccessToken, TokenCredential from azure.core.credentials_async import AsyncTokenCredential from ._enums import AgentStreamEvent, ConnectionType from ._models import ( - GetConnectionResponse, - MessageDeltaChunk, - SubmitToolOutputsAction, - ThreadRun, - RunStep, - RunStepDeltaChunk, - FunctionToolDefinition, - FunctionDefinition, - ToolDefinition, - ToolResources, - FileSearchToolDefinition, - FileSearchToolResource, - BingGroundingToolDefinition, - SharepointToolDefinition, - ToolConnection, - ToolConnectionList, AzureAISearchResource, - IndexResource, AzureAISearchToolDefinition, + BingGroundingToolDefinition, CodeInterpreterToolDefinition, CodeInterpreterToolResource, - RequiredFunctionToolCall, - OpenAIPageableListOfThreadMessage, - ThreadMessage, - MessageTextContent, + FileSearchToolDefinition, + FileSearchToolResource, + FunctionDefinition, + FunctionToolDefinition, + GetConnectionResponse, + IndexResource, + MessageDeltaChunk, MessageImageFileContent, + MessageTextContent, MessageTextFileCitationAnnotation, MessageTextFilePathAnnotation, -) - -from abc import ABC, abstractmethod -from typing import ( - AsyncIterator, - Awaitable, - Callable, - List, - Dict, - Any, - Type, - Optional, - Iterator, - Tuple, - Set, - get_origin, - cast, - get_args, - Union, + OpenAIPageableListOfThreadMessage, + RequiredFunctionToolCall, + RunStep, + RunStepDeltaChunk, + SharepointToolDefinition, + SubmitToolOutputsAction, + ThreadMessage, + ThreadRun, + ToolConnection, + ToolConnectionList, + ToolDefinition, + ToolResources, ) logger = logging.getLogger(__name__) @@ -178,13 +177,13 @@ def __str__(self): out += f' "connection_type": "{self.connection_type}",\n' out += f' "endpoint_url": "{self.endpoint_url}",\n' if self.key: - out += f' "key": "REDACTED"\n' + out += ' "key": "REDACTED"\n' else: - out += f' "key": null\n' + out += ' "key": null\n' if self.token_credential: - out += f' "token_credential": "REDACTED"\n' + out += ' "token_credential": "REDACTED"\n' else: - out += f' "token_credential": null\n' + out += ' "token_credential": null\n' out += "}\n" return out @@ -290,9 +289,9 @@ def _map_type(annotation) -> Dict[str, Any]: args = get_args(annotation) item_type = args[0] if args else str return {"type": "array", "items": _map_type(item_type)} - elif origin in {dict, Dict}: + if origin in {dict, Dict}: return {"type": "object"} - elif origin is Union: + if origin is Union: args = get_args(annotation) # If Union contains None, it is an optional parameter if type(None) in args: @@ -310,7 +309,7 @@ def _map_type(annotation) -> Dict[str, Any]: return schema # If Union contains multiple types, it is a oneOf parameter return {"oneOf": [_map_type(arg) for arg in args]} - elif isinstance(annotation, type): + if isinstance(annotation, type): schema_type = type_map.get(annotation.__name__, "string") return {"type": schema_type} @@ -334,13 +333,11 @@ class Tool(ABC): @abstractmethod def definitions(self) -> List[ToolDefinition]: """Get the tool definitions.""" - pass @property @abstractmethod def resources(self) -> ToolResources: """Get the tool resources.""" - pass @abstractmethod def execute(self, tool_call: Any) -> Any: @@ -350,7 +347,6 @@ def execute(self, tool_call: Any) -> Any: :param tool_call: The tool call to execute. :return: The output of the tool operations. """ - pass class BaseFunctionTool(Tool): @@ -430,7 +426,7 @@ def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, arguments = tool_call.function.arguments if function_name not in self._functions: - logging.error(f"Function '{function_name}' not found.") + logging.error("Function '%s' not found.", function_name) raise ValueError(f"Function '{function_name}' not found.") function = self._functions[function_name] @@ -438,11 +434,11 @@ def _get_func_and_args(self, tool_call: RequiredFunctionToolCall) -> Tuple[Any, try: parsed_arguments = json.loads(arguments) except json.JSONDecodeError as e: - logging.error(f"Invalid JSON arguments for function '{function_name}': {e}") + logging.error("Invalid JSON arguments for function '%s': %s", function_name, e) raise ValueError(f"Invalid JSON arguments: {e}") from e if not isinstance(parsed_arguments, dict): - logging.error(f"Arguments must be a JSON object for function '{function_name}'.") + logging.error("Arguments must be a JSON object for function '%s'.", function_name) raise TypeError("Arguments must be a JSON object.") return function, parsed_arguments @@ -488,8 +484,7 @@ async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: try: if inspect.iscoroutinefunction(function): return await function(**parsed_arguments) if parsed_arguments else await function() - else: - return function(**parsed_arguments) if parsed_arguments else function() + return function(**parsed_arguments) if parsed_arguments else function() except TypeError as e: error_message = f"Error executing function '{tool_call.function.name}': {e}" logging.error(error_message) @@ -730,7 +725,7 @@ def remove(self, tool_type: Type[Tool]) -> None: for i, tool in enumerate(self._tools): if isinstance(tool, tool_type): del self._tools[i] - logging.info(f"Tool of type {tool_type.__name__} removed from the ToolSet.") + logging.info("Tool of type %s removed from the ToolSet.", tool_type.__name__) return raise ValueError(f"Tool of type {tool_type.__name__} not found in the ToolSet.") @@ -767,7 +762,7 @@ def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolRes try: return ToolResources(**resources) except TypeError as e: - logging.error(f"Error creating ToolResources: {e}") + logging.error("Error creating ToolResources: %s", e) raise ValueError("Invalid resources for ToolResources.") from e def get_definitions_and_resources(self) -> Dict[str, Any]: @@ -832,7 +827,7 @@ def execute_tool_calls(self, tool_calls: List[Any]) -> Any: } tool_outputs.append(tool_output) except Exception as e: - logging.error(f"Failed to execute tool call {tool_call}: {e}") + logging.error("Failed to execute tool call %s: %s", tool_call, e) return tool_outputs @@ -874,7 +869,7 @@ async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: } tool_outputs.append(tool_output) except Exception as e: - logging.error(f"Failed to execute tool call {tool_call}: {e}") + logging.error("Failed to execute tool call %s: %s", tool_call, e) return tool_outputs @@ -883,70 +878,54 @@ class AgentEventHandler: def on_message_delta(self, delta: "MessageDeltaChunk") -> None: """Handle message delta events.""" - pass def on_thread_message(self, message: "ThreadMessage") -> None: """Handle thread message events.""" - pass def on_thread_run(self, run: "ThreadRun") -> None: """Handle thread run events.""" - pass def on_run_step(self, step: "RunStep") -> None: """Handle run step events.""" - pass def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: """Handle run step delta events.""" - pass def on_error(self, data: str) -> None: """Handle error events.""" - pass def on_done(self) -> None: """Handle the completion of the stream.""" - pass def on_unhandled_event(self, event_type: str, event_data: Any) -> None: """Handle any unhandled event types.""" - pass class AsyncAgentEventHandler: async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: """Handle message delta events.""" - pass async def on_thread_message(self, message: "ThreadMessage") -> None: """Handle thread message events.""" - pass async def on_thread_run(self, run: "ThreadRun") -> None: """Handle thread run events.""" - pass async def on_run_step(self, step: "RunStep") -> None: """Handle run step events.""" - pass async def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: """Handle run step delta events.""" - pass async def on_error(self, data: str) -> None: """Handle error events.""" - pass async def on_done(self) -> None: """Handle the completion of the stream.""" - pass async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: """Handle any unhandled event types.""" - pass StreamEventData = Union[MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep, None] @@ -988,7 +967,7 @@ async def __anext__(self) -> Tuple[str, StreamEventData]: event_data_str, self.buffer = self.buffer, "" if event_data_str: return await self._process_event(event_data_str) - raise StopAsyncIteration + raise while "\n\n" in self.buffer: event_data_str, self.buffer = self.buffer.split("\n\n", 1) @@ -1088,7 +1067,7 @@ async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventDat else: await self.event_handler.on_unhandled_event(event_type, event_data_obj) except Exception as e: - logging.error(f"Error in event handler for event '{event_type}': {e}") + logging.error("Error in event handler for event '%s': %s", event_type, e) return event_type, event_data_obj @@ -1139,7 +1118,7 @@ def __next__(self) -> Tuple[str, StreamEventData]: event_data_str, self.buffer = self.buffer, "" if event_data_str: return self._process_event(event_data_str) - raise StopIteration + raise while "\n\n" in self.buffer: event_data_str, self.buffer = self.buffer.split("\n\n", 1) @@ -1237,7 +1216,7 @@ def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData]: else: self.event_handler.on_unhandled_event(event_type, event_data_obj) except Exception as e: - logging.error(f"Error in event handler for event '{event_type}': {e}") + logging.error("Error in event handler for event '%s': %s", event_type, e) return event_type, event_data_obj diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 3dd25e7c64d4..144be9b900b0 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -7,29 +7,31 @@ Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize """ -import sys, io, logging, os, time -from azure.core.exceptions import ResourceNotFoundError -from typing import List, Union, IO, Any, Dict, Optional, overload, Sequence, TYPE_CHECKING, Iterator, TextIO, cast +import io +import logging +import os +import sys +import time from pathlib import Path +from typing import IO, TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Sequence, TextIO, Union, cast, overload -from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated -from ._operations import AgentsOperations as AgentsOperationsGenerated -from ._operations import TelemetryOperations as TelemetryOperationsGenerated -from ..models._enums import AuthenticationType, ConnectionType +from azure.core.exceptions import ResourceNotFoundError +from azure.core.tracing.decorator import distributed_trace + +from .. import models as _models +from .._vendor import FileType +from ..models._enums import AuthenticationType, ConnectionType, FilePurpose from ..models._models import ( - GetConnectionResponse, - ListConnectionsResponse, GetAppInsightsResponse, + GetConnectionResponse, GetWorkspaceResponse, InternalConnectionPropertiesSASAuth, + ListConnectionsResponse, ) - from ..models._patch import ConnectionProperties -from ..models._enums import FilePurpose -from .._vendor import FileType -from .. import models as _models - -from azure.core.tracing.decorator import distributed_trace +from ._operations import AgentsOperations as AgentsOperationsGenerated +from ._operations import ConnectionsOperations as ConnectionsOperationsGenerated +from ._operations import TelemetryOperations as TelemetryOperationsGenerated if sys.version_info >= (3, 9): from collections.abc import MutableMapping @@ -38,10 +40,11 @@ if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports - from .. import _types - from azure.ai.inference import ChatCompletionsClient, EmbeddingsClient from openai import AzureOpenAI - from azure.identity import get_bearer_token_provider + + from azure.ai.inference import ChatCompletionsClient, EmbeddingsClient + + from .. import _types JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object _Unset: Any = object() @@ -87,10 +90,10 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": try: from azure.ai.inference import ChatCompletionsClient - except ModuleNotFoundError as _: + except ModuleNotFoundError as e: raise ModuleNotFoundError( "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) + ) from e if use_serverless_connection: endpoint = connection.endpoint_url @@ -158,10 +161,10 @@ def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": try: from azure.ai.inference import EmbeddingsClient - except ModuleNotFoundError as _: + except ModuleNotFoundError as e: raise ModuleNotFoundError( "Azure AI Inference SDK is not installed. Please install it using 'pip install azure-ai-inference'" - ) + ) from e if use_serverless_connection: endpoint = connection.endpoint_url @@ -222,8 +225,10 @@ def get_azure_openai_client(self, *, api_version: Optional[str] = None, **kwargs try: from openai import AzureOpenAI - except ModuleNotFoundError as _: - raise ModuleNotFoundError("OpenAI SDK is not installed. Please install it using 'pip install openai'") + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + "OpenAI SDK is not installed. Please install it using 'pip install openai'" + ) from e if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( @@ -232,21 +237,18 @@ def get_azure_openai_client(self, *, api_version: Optional[str] = None, **kwargs client = AzureOpenAI( api_key=connection.key, azure_endpoint=connection.endpoint_url, api_version=api_version ) - elif ( - connection.authentication_type == AuthenticationType.ENTRA_ID - or connection.authentication_type == AuthenticationType.SAS - ): + elif connection.authentication_type == {AuthenticationType.ENTRA_ID, AuthenticationType.SAS}: try: from azure.identity import get_bearer_token_provider - except ModuleNotFoundError as _: + except ModuleNotFoundError as e: raise ModuleNotFoundError( "azure.identity package not installed. Please install it using 'pip install azure.identity'" - ) + ) from e if connection.authentication_type == AuthenticationType.ENTRA_ID: auth = "Creating AzureOpenAI using Entra ID authentication" else: auth = "Creating AzureOpenAI using SAS authentication" - logger.debug(f"[InferenceOperations.get_azure_openai_client] {auth}") + logger.debug("[InferenceOperations.get_azure_openai_client] %s", auth) client = AzureOpenAI( # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( @@ -291,8 +293,7 @@ def get_default( return self.get( connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs ) - else: - return connection_properties_list[0] + return connection_properties_list[0] raise ResourceNotFoundError(f"No connection of type {connection_type} found") @distributed_trace @@ -319,7 +320,7 @@ def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: ) if connection.properties.auth_type == AuthenticationType.ENTRA_ID: return ConnectionProperties(connection=connection, token_credential=self._config.credential) - elif connection.properties.auth_type == AuthenticationType.SAS: + if connection.properties.auth_type == AuthenticationType.SAS: from ..models._patch import SASTokenCredential cred_prop = cast(InternalConnectionPropertiesSASAuth, connection.properties) @@ -335,9 +336,8 @@ def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: return ConnectionProperties(connection=connection, token_credential=token_credential) return ConnectionProperties(connection=connection) - else: - connection = self._get_connection(connection_name=connection_name, **kwargs) - return ConnectionProperties(connection=connection) + connection = self._get_connection(connection_name=connection_name, **kwargs) + return ConnectionProperties(connection=connection) @distributed_trace def list( @@ -372,10 +372,10 @@ def _get_trace_exporter(destination: Union[TextIO, str, None]) -> Any: # See: https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html#usage try: from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter # type: ignore - except ModuleNotFoundError as _: + except ModuleNotFoundError as e: raise ModuleNotFoundError( "OpenTelemetry OTLP exporter is not installed. Please install it using 'pip install opentelemetry-exporter-otlp-proto-grpc'" - ) + ) from e return OTLPSpanExporter(endpoint=destination) if isinstance(destination, io.TextIOWrapper): @@ -383,10 +383,10 @@ def _get_trace_exporter(destination: Union[TextIO, str, None]) -> Any: # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter try: from opentelemetry.sdk.trace.export import ConsoleSpanExporter - except ModuleNotFoundError as _: + except ModuleNotFoundError as e: raise ModuleNotFoundError( "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" - ) + ) from e return ConsoleSpanExporter() else: @@ -507,7 +507,7 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: from azure.core.settings import settings settings.tracing_implementation = "opentelemetry" - except ModuleNotFoundError as _: + except ModuleNotFoundError: logger.warning( "Azure SDK tracing plugin is not installed. Please install it using 'pip install azure-core-tracing-opentelemetry'" ) @@ -518,7 +518,7 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: instrumentor = AIInferenceInstrumentor() if not instrumentor.is_instrumented(): instrumentor.instrument() - except ModuleNotFoundError as _: + except ModuleNotFoundError: logger.warning( "Could not call `AIInferenceInstrumentor().instrument()` since `azure-ai-inference` is not installed" ) @@ -536,7 +536,7 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor # type: ignore OpenAIInstrumentor().instrument() - except ModuleNotFoundError as _: + except ModuleNotFoundError: logger.warning( "Could not call `OpenAIInstrumentor().instrument()` since `opentelemetry-instrumentation-openai-v2` is not installed" ) @@ -545,7 +545,7 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: from opentelemetry.instrumentation.langchain import LangchainInstrumentor # type: ignore LangchainInstrumentor().instrument() - except ModuleNotFoundError as _: + except ModuleNotFoundError: logger.warning( "Could not call LangchainInstrumentor().instrument()` since `opentelemetry-instrumentation-langchain` is not installed" ) @@ -618,6 +618,10 @@ def enable(self, *, destination: Union[TextIO, str, None] = None, **kwargs) -> N class AgentsOperations(AgentsOperationsGenerated): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._toolset: Optional[_models.ToolSet] = None + @overload def create_agent(self, body: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.Agent: """Creates a new agent. @@ -1126,9 +1130,7 @@ def _get_toolset(self) -> Optional[_models.ToolSet]: :return: The toolset for the agent. If not set, returns None. :rtype: ~azure.ai.projects.models.ToolSet """ - if hasattr(self, "_toolset"): - return self._toolset - return None + return self._toolset @overload def create_run( @@ -2074,7 +2076,7 @@ def _handle_submit_tool_outputs( logger.warning("Toolset is not available in the client.") return - logger.info(f"Tool outputs: {tool_outputs}") + logger.info("Tool outputs: %s", tool_outputs) if tool_outputs: with self.submit_tool_outputs_to_stream( thread_id=run.thread_id, run_id=run.id, tool_outputs=tool_outputs, event_handler=event_handler @@ -2175,7 +2177,7 @@ def upload_file( return super().upload_file(file=file_content, purpose=purpose, **kwargs) except IOError as e: - raise IOError(f"Unable to read file: {file_path}. Reason: {str(e)}") + raise IOError(f"Unable to read file: {file_path}") from e raise ValueError("Invalid parameters for upload_file. Please provide the necessary arguments.") @@ -2683,10 +2685,10 @@ def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str else: raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") - logger.debug(f"File '{sanitized_file_name}' saved successfully at '{target_file_path}'.") + logger.debug("File '%s' saved successfully at '%s'.", sanitized_file_name, target_file_path) except (ValueError, RuntimeError, TypeError, IOError) as e: - logger.error(f"An error occurred in save_file: {e}") + logger.error("An error occurred in save_file: %s", e) raise @overload diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py index 1a853c8e5817..8b7d6c8077f1 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py @@ -7,24 +7,18 @@ # Licensed under the MIT License. # ------------------------------------ import copy -from enum import Enum import functools -import json import importlib +import json import logging import os -from azure.ai.projects import _types -from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union, cast +from enum import Enum +from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast from urllib.parse import urlparse -from azure.ai.projects.telemetry.agents._utils import * # pylint: disable=unused-wildcard-import -# pylint: disable = no-name-in-module -from azure.core import CaseInsensitiveEnumMeta # type: ignore -from azure.core.settings import settings -from azure.ai.projects.operations import AgentsOperations -from azure.ai.projects.aio.operations import AgentsOperations as AsyncAgentOperations -from azure.ai.projects.models import _models, AgentRunStream -from azure.ai.projects.models._enums import MessageRole, RunStepStatus, AgentsApiResponseFormatMode +from azure.ai.projects import _types +from azure.ai.projects.models import AgentRunStream, _models +from azure.ai.projects.models._enums import AgentsApiResponseFormatMode, MessageRole, RunStepStatus from azure.ai.projects.models._models import ( MessageAttachment, MessageDeltaChunk, @@ -33,22 +27,43 @@ RunStepDeltaChunk, RunStepFunctionToolCall, RunStepToolCallDetails, - SubmitToolOutputsAction, ThreadMessage, - ThreadMessageOptions, ThreadRun, ToolDefinition, ToolOutput, ToolResources, ) from azure.ai.projects.models._patch import AgentEventHandler, ToolSet +from azure.ai.projects.telemetry.agents._utils import ( + AZ_AI_AGENT_SYSTEM, + ERROR_TYPE, + GEN_AI_AGENT_DESCRIPTION, + GEN_AI_AGENT_ID, + GEN_AI_AGENT_NAME, + GEN_AI_EVENT_CONTENT, + GEN_AI_MESSAGE_ID, + GEN_AI_MESSAGE_STATUS, + GEN_AI_RESPONSE_MODEL, + GEN_AI_SYSTEM, + GEN_AI_SYSTEM_MESSAGE, + GEN_AI_THREAD_ID, + GEN_AI_THREAD_RUN_ID, + GEN_AI_THREAD_RUN_STATUS, + GEN_AI_USAGE_INPUT_TOKENS, + GEN_AI_USAGE_OUTPUT_TOKENS, + OperationName, + start_span, +) +from azure.core import CaseInsensitiveEnumMeta # type: ignore +from azure.core.settings import settings _Unset: Any = object() try: # pylint: disable = no-name-in-module + from opentelemetry.trace import Span, StatusCode + from azure.core.tracing import AbstractSpan, SpanKind # type: ignore - from opentelemetry.trace import StatusCode, Span _tracing_library_available = True except ModuleNotFoundError: @@ -399,7 +414,7 @@ def _add_tool_assistant_message_event(self, span, step: RunStep) -> None: ) attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls}) - span.span_instance.add_event(name=f"gen_ai.assistant.message", attributes=attributes) + span.span_instance.add_event(name="gen_ai.assistant.message", attributes=attributes) def set_end_run(self, span: "AbstractSpan", run: ThreadRun) -> None: if span and span.span_instance.is_recording: @@ -1185,25 +1200,25 @@ def inner(*args, **kwargs): if class_function_name.startswith("AgentsOperations.create_agent"): return self.trace_create_agent(function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.create_thread"): + if class_function_name.startswith("AgentsOperations.create_thread"): return self.trace_create_thread(function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.create_message"): + if class_function_name.startswith("AgentsOperations.create_message"): return self.trace_create_message(function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.create_run"): + if class_function_name.startswith("AgentsOperations.create_run"): return self.trace_create_run(OperationName.START_THREAD_RUN, function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.create_and_process_run"): + if class_function_name.startswith("AgentsOperations.create_and_process_run"): return self.trace_create_run(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_run"): + if class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_run"): return self.trace_submit_tool_outputs(False, function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_stream"): + if class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_stream"): return self.trace_submit_tool_outputs(True, function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations._handle_submit_tool_outputs"): + if class_function_name.startswith("AgentsOperations._handle_submit_tool_outputs"): return self.trace_handle_submit_tool_outputs(function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.create_stream"): + if class_function_name.startswith("AgentsOperations.create_stream"): return self.trace_create_stream(function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.list_messages"): + if class_function_name.startswith("AgentsOperations.list_messages"): return self.trace_list_messages(function, *args, **kwargs) - elif class_function_name.startswith("AgentRunStream.__exit__"): + if class_function_name.startswith("AgentRunStream.__exit__"): return self.handle_run_stream_exit(function, *args, **kwargs) # Handle the default case (if the function name does not match) return None # Ensure all paths return @@ -1245,23 +1260,23 @@ async def inner(*args, **kwargs): if class_function_name.startswith("AgentsOperations.create_agent"): return await self.trace_create_agent_async(function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.create_thread"): + if class_function_name.startswith("AgentsOperations.create_thread"): return await self.trace_create_thread_async(function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.create_message"): + if class_function_name.startswith("AgentsOperations.create_message"): return await self.trace_create_message_async(function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.create_run"): + if class_function_name.startswith("AgentsOperations.create_run"): return await self.trace_create_run_async(OperationName.START_THREAD_RUN, function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.create_and_process_run"): + if class_function_name.startswith("AgentsOperations.create_and_process_run"): return await self.trace_create_run_async(OperationName.PROCESS_THREAD_RUN, function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_run"): + if class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_run"): return await self.trace_submit_tool_outputs_async(function, False, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_stream"): + if class_function_name.startswith("AgentsOperations.submit_tool_outputs_to_stream"): return await self.trace_submit_tool_outputs_async(function, True, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations._handle_submit_tool_outputs"): + if class_function_name.startswith("AgentsOperations._handle_submit_tool_outputs"): return await self.trace_handle_submit_tool_outputs_async(function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.create_stream"): + if class_function_name.startswith("AgentsOperations.create_stream"): return await self.trace_create_stream_async(function, *args, **kwargs) - elif class_function_name.startswith("AgentsOperations.list_messages"): + if class_function_name.startswith("AgentsOperations.list_messages"): return await self.trace_list_messages_async(function, *args, **kwargs) # Handle the default case (if the function name does not match) return None # Ensure all paths return @@ -1503,7 +1518,7 @@ def on_thread_message(self, message: "ThreadMessage") -> None: if self.inner_handler: self.inner_handler.on_thread_message(message) - if message.status == "completed" or message.status == "incomplete": + if message.status in {"completed", "incomplete"}: self.last_message = message def on_thread_run(self, run: "ThreadRun") -> None: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py index 6a17ffb45f0e..92f12a77ca12 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py @@ -6,13 +6,11 @@ from enum import Enum from typing import Optional -from azure.core.tracing import SpanKind # type: ignore +from azure.core.tracing import AbstractSpan, SpanKind # type: ignore from azure.core.settings import settings # type: ignore try: - # pylint: disable = no-name-in-module - from azure.core.tracing import AbstractSpan, SpanKind # type: ignore - from opentelemetry.trace import StatusCode, Span + from opentelemetry.trace import StatusCode, Span # noqa: F401 # pylint: disable=unused-import _span_impl_type = settings.tracing_implementation() except ModuleNotFoundError: diff --git a/sdk/ai/azure-ai-projects/pyproject.toml b/sdk/ai/azure-ai-projects/pyproject.toml index d9a1436e1c08..3254a176e526 100644 --- a/sdk/ai/azure-ai-projects/pyproject.toml +++ b/sdk/ai/azure-ai-projects/pyproject.toml @@ -12,4 +12,17 @@ exclude = [ ] warn_unused_configs = true ignore_missing_imports = true -follow_imports_for_stubs = false \ No newline at end of file +follow_imports_for_stubs = false + +[tool.isort] +profile = "black" +line_length = 120 +known_first_party = ["azure"] +filter_files=true +extend_skip_glob = [ + "*/_vendor/*", + "*/_generated/*", + "*/_restclient/*", + "*/doc/*", + "*/.tox/*", +] From 4ae547e59a388385e6127185362c3f58d5c756f1 Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Mon, 11 Nov 2024 10:07:43 -0800 Subject: [PATCH 106/138] Add readme for get_message, save_file, get_file_content, tracing, Bing grounding, enterprise (#38426) * git Add readme for get_message, save_file, get_file_content, tracing * Readme for enterprise search, code interpreter, attachment * More documentation for tools * Fixed create_thread documentation * resolved comments * Resolved comments * Resolved comments from Krista --- sdk/ai/azure-ai-projects/README.md | 381 ++++++++++++++---- .../ai/projects/aio/operations/_patch.py | 1 + .../agents/sample_agents_azure_ai_search.py | 2 + .../samples/agents/sample_agents_basics.py | 6 +- ...gents_basics_with_azure_monitor_tracing.py | 9 +- .../agents/sample_agents_bing_grounding.py | 3 + .../agents/sample_agents_code_interpreter.py | 17 +- ...nterpreter_attachment_enterprise_search.py | 3 + .../sample_agents_enterprise_file_search.py | 2 + .../sample_agents_with_resources_in_thread.py | 1 - 10 files changed, 336 insertions(+), 89 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 3f2f99ff9450..4b25c24a575d 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -43,21 +43,29 @@ TODO ## Examples ### Agents -The following steps outline the typical sequence for interacting with agents: - - - Create a project client - - Create an agent with toolset, or tools and tool resources including: - - File Search with file upload indexed by vector stores - - Code Interpreter with file upload - - Function calls +Agents in the Azure AI Projects client library are designed to facilitate various interactions and operations within your AI projects. They serve as the core components that manage and execute tasks, leveraging different tools and resources to achieve specific goals. The following steps outline the typical sequence for interacting with agents: + + - Create project client + - Create agent with: + - File Search + - Code interpreter + - File search with file in blob store + - Code interpreter with file in blob store + - Bing grounding + - Azure AI Search + - Function call - Create a thread with - Tool resource - Create a message with: - File search attachment - - Code interpreter attachment + - Code interpreter attachment + - File search attachment with file in blob store + - Code interpreter attachment with file in blob store - Execute Run, Run_and_Process, or Stream - - Retrieve messages - - Tear down by deleting resources + - Retrieve message + - Retrieve file + - Tear down by deleting resource + - Tracing #### Create Project Client @@ -93,53 +101,48 @@ with project_client: instructions="You are helpful assistant" ) -# For asynchronous -async with project_client: - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant" - ) - ``` In the sections below, we will only provide code snippets in synchronous functions. #### Create Agent -Now you should have your project client. From the project client, you create an agent to serve the end users. An agent should Here is an example of create an agent: +Now you should have your project client. From the project client, you create an agent to serve the end users. + +Here is an example of create an agent: ```python - - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" - ) +agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" +) ``` -#### Create Agent with Toolset, or Tools and Tool Resources -In order to use tools, you can provide toolset. Here is an example: +To allow agents to access your resources or custom functions, you need tools. You can pass tools to `create_agent` by either `toolset` or combination of `tools` and `tool_resources`. + +Here is an example of `toolset`: ```python - - functions = FunctionTool(user_functions) - code_interpreter = CodeInterpreterTool() +functions = FunctionTool(user_functions) +code_interpreter = CodeInterpreterTool() - toolset = ToolSet() - toolset.add(functions) - toolset.add(code_interpreter) - - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset - ) +toolset = ToolSet() +toolset.add(functions) +toolset.add(code_interpreter) + +agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", name="my-assistant", instructions="You are a helpful assistant", toolset=toolset +) ``` -Alternatively you can provide tool and tool resources. Here is an example: +Also notices that if you use asynchronous client, you use `AsyncToolSet` instead. Additional information related to `AsyncFunctionTool` be discussed in the later sections. + +Here is an example to use `tools` and `tool_resources`: ```python @@ -157,7 +160,9 @@ agent = project_client.agents.create_agent( -#### Create Agent with File Upload in Vector Store for File Search +In the following sections, we show you code snips in either `toolset` or combination of `tools` and `tool_resources`. But you are welcome to use another approach. + +#### Create Agent with File Search To perform file search by an agent, we first need to upload a file, create a vector store, and associate the file to the vector store. Here is an example: @@ -167,9 +172,7 @@ Here is an example: file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") print(f"Uploaded file, file ID: {file.id}") -vector_store = project_client.agents.create_vector_store_and_poll( - file_ids=[file.id], name="my_vectorstore" -) +vector_store = project_client.agents.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") print(f"Created vector store, vector store ID: {vector_store.id}") # Create file search tool with resources followed by creating agent @@ -186,9 +189,8 @@ agent = project_client.agents.create_agent( -Again, you can define `toolset` instead of passing `tools` and `tool_resources`. -#### Create Agent with File Upload for Code Interpreter +#### Create Agent with Code Interpreter Here is an example to upload a file and use it for code interpreter by an agent: @@ -214,7 +216,105 @@ agent = project_client.agents.create_agent( -#### Create Agent with Function Tool + +#### Create Agent with File Search with File in Blob Store +The sections above demonstrated uploading files only for agents to perform file search and code interpreter. In some use case, you might want to have the files more reusable for other projects. You might consider uploading files into blob store instead. +Here is an example: + + + +```python +# We will upload the local file to Azure and will use it for vector store creation. +_, asset_uri = project_client.upload_file("./product_info_1.md") + +# create a vector store with no file and wait for it to be processed +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) +vector_store = project_client.agents.create_vector_store_and_poll(data_sources=[ds], name="sample_vector_store") +print(f"Created vector store, vector store ID: {vector_store.id}") + +# create a file search tool +file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) + +# notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file +agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, +) +``` + + + +#### Create Agent with Code Interpreter with File in Blob Store + +Coming soon + +#### Create Agent with Bing Grounding +To enable your agent to perform search through Bing search API, you use `BingGroundingTool` along with a connection. + +Here is an example: + + + +```python +bing_connection = project_client.connections.get(connection_name=os.environ["BING_CONNECTION_NAME"]) +conn_id = bing_connection.id + +print(conn_id) + +# Initialize agent bing tool and add the connection id +bing = BingGroundingTool(connection_id=conn_id) + +# Create agent with the bing tool and process assistant run +with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a helpful assistant", + tools=bing.definitions, + headers={"x-ms-enable-preview": "true"}, + ) +``` + + + +#### Create Agent with Azure AI Search +Azure AI Search is an enterprise search system for high-performance applications. It integrates with Azure OpenAI Service and Azure Machine Learning, offering advanced search technologies like vector search and full-text search. Ideal for knowledge base insights, information discovery, and automation + +Here is an example to integrate Azure AI Search: + + + +```python +conn_list = project_client.connections.list() +conn_id = "" +for conn in conn_list: + if conn.connection_type == "CognitiveSearch": + conn_id = conn.id + break + +print(conn_id) + +# Initialize agent AI search tool and add the search index connection id +ai_search = AzureAISearchTool() +ai_search.add_index(conn_id, "sample_index") + +# Create agent with AI search tool and process assistant run +with project_client: + agent = project_client.agents.create_agent( + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are a helpful assistant", + tools=ai_search.definitions, + headers={"x-ms-enable-preview": "true"}, + ) +``` + + + +#### Create Agent with Function Call You can enhance your agents by defining callback functions as function tools. These can be provided to `create_agent` via either the `toolset` parameter or the combination of `tools` and `tool_resources`. Here are the distinctions: @@ -266,8 +366,7 @@ For each session or conversation, a thread is required. Here is an example: ```python - - thread = project_client.agents.create_thread() +thread = project_client.agents.create_thread() ``` @@ -283,9 +382,7 @@ In some scenarios, you might need to assign specific resources to individual thr file = project_client.agents.upload_file_and_poll(file_path="product_info_1.md", purpose="assistants") print(f"Uploaded file, file ID: {file.id}") -vector_store = project_client.agents.create_vector_store_and_poll( - file_ids=[file.id], name="my_vectorstore" -) +vector_store = project_client.agents.create_vector_store_and_poll(file_ids=[file.id], name="my_vectorstore") print(f"Created vector store, vector store ID: {vector_store.id}") # Create file search tool with resources followed by creating agent @@ -297,6 +394,12 @@ agent = project_client.agents.create_agent( instructions="Hello, you are helpful assistant and can search information from uploaded files", tools=file_search.definitions, ) + +print(f"Created agent, ID: {agent.id}") + +# Create thread with file resources. +# If the agent has multiple threads, only this thread can search this file. +thread = project_client.agents.create_thread(tool_resources=file_search.resources) ``` @@ -326,7 +429,7 @@ message = project_client.agents.create_message( -#### Create Message with Code Interpreter File Attachment +#### Create Message with Code Interpreter Attachment To attach a file to a message for data analysis, you use `MessageAttachment` and `CodeInterpreterTool`. You must pass `CodeInterpreterTool` as `tools` or `toolset` in `create_agent` call or the file attachment cannot be opened for code interpreter. Here is an example to pass `CodeInterpreterTool` as tool: @@ -334,7 +437,7 @@ Here is an example to pass `CodeInterpreterTool` as tool: ```python -# notice that CodeInterpreter must be enabled in the agent creation, +# notice that CodeInterpreter must be enabled in the agent creation, # otherwise the agent will not be able to see the file attachment for code interpretation agent = project_client.agents.create_agent( model="gpt-4-1106-preview", @@ -355,12 +458,38 @@ message = project_client.agents.create_message( thread_id=thread.id, role="user", content="Could you please create bar chart in TRANSPORTATION sector for the operating profit from the uploaded csv file and provide file to me?", - attachments=[attachment] + attachments=[attachment], ) ``` +#### Create Message with File Search Attachment with File in Blob Store + +Coming Soon + +#### Create Message with Code Interpreter Attachment with File in Blob Store +Alternatively you can upload a file to blob store, attach to the message as file attachment. Here is an example: + +Here is an example to pass `CodeInterpreterTool` as tool: + + + +```python +# We will upload the local file to Azure and will use it for vector store creation. +_, asset_uri = project_client.upload_file("./product_info_1.md") +ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) + +# create a message with the attachment +attachment = MessageAttachment(data_sources=[ds], tools=code_interpreter.definitions) +message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] +) +``` + + + + #### Create Run, Run_and_Process, or Stream To process your message, you can use `create_run`, `create_and_process_run`, or `create_stream`. @@ -402,11 +531,10 @@ Here is an example: ```python - - with project_client.agents.create_stream( - thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() - ) as stream: - stream.until_done() +with project_client.agents.create_stream( + thread_id=thread.id, assistant_id=agent.id, event_handler=MyEventHandler() +) as stream: + stream.until_done() ``` @@ -416,38 +544,36 @@ The event handler is optional. Here is an example: ```python - class MyEventHandler(AgentEventHandler): - def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - for content_part in delta.delta.content: - if isinstance(content_part, MessageDeltaTextContent): - text_value = content_part.text.value if content_part.text else "No text" - print(f"Text delta received: {text_value}") + def on_message_delta(self, delta: "MessageDeltaChunk") -> None: + for content_part in delta.delta.content: + if isinstance(content_part, MessageDeltaTextContent): + text_value = content_part.text.value if content_part.text else "No text" + print(f"Text delta received: {text_value}") - def on_thread_message(self, message: "ThreadMessage") -> None: - print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") + def on_thread_message(self, message: "ThreadMessage") -> None: + print(f"ThreadMessage created. ID: {message.id}, Status: {message.status}") - def on_thread_run(self, run: "ThreadRun") -> None: - print(f"ThreadRun status: {run.status}") + def on_thread_run(self, run: "ThreadRun") -> None: + print(f"ThreadRun status: {run.status}") - def on_run_step(self, step: "RunStep") -> None: - print(f"RunStep type: {step.type}, Status: {step.status}") + def on_run_step(self, step: "RunStep") -> None: + print(f"RunStep type: {step.type}, Status: {step.status}") - def on_error(self, data: str) -> None: - print(f"An error occurred. Data: {data}") + def on_error(self, data: str) -> None: + print(f"An error occurred. Data: {data}") - def on_done(self) -> None: - print("Stream completed.") + def on_done(self) -> None: + print("Stream completed.") - def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - print(f"Unhandled Event Type: {event_type}, Data: {event_data}") + def on_unhandled_event(self, event_type: str, event_data: Any) -> None: + print(f"Unhandled Event Type: {event_type}, Data: {event_data}") ``` - -#### Retrieve Messages +#### Retrieve Message To retrieve messages from agents, use the following example: @@ -455,10 +581,76 @@ To retrieve messages from agents, use the following example: ```python messages = project_client.agents.list_messages(thread_id=thread.id) +last_message_content = messages.data[-1].content[-1].text.value +print(f"Last message content: {last_message_content}") ``` +Depending on the use case, if you expect the agents to return only text messages, `list_messages` should be sufficient. +If you are using tools, consider using the `get_messages` function instead. This function classifies the message content and returns properties such as `text_messages`, `image_contents`, `file_citation_annotations`, and `file_path_annotations`. + +### Retrieve File + +Files uploaded by agents cannot be retrieved back. If your use case need to access the file content uploaded by the agents, you are adviced to keep an additional copy accessible by your application. However, files generated by agents are retrievable by `save_file` or `get_file_content`. + +Here is an example retrieving file ids from messages and save to the local drive: + + + +```python +messages = project_client.agents.get_messages(thread_id=thread.id) +print(f"Messages: {messages}") + +for image_content in messages.image_contents: + file_id = image_content.image_file.file_id + print(f"Image File ID: {file_id}") + file_name = f"{file_id}_image_file.png" + project_client.agents.save_file(file_id=file_id, file_name=file_name) + print(f"Saved image file to: {Path.cwd() / file_name}") + +for file_path_annotation in messages.file_path_annotations: + print(f"File Paths:") + print(f"Type: {file_path_annotation.type}") + print(f"Text: {file_path_annotation.text}") + print(f"File ID: {file_path_annotation.file_path.file_id}") + print(f"Start Index: {file_path_annotation.start_index}") + print(f"End Index: {file_path_annotation.end_index}") +``` + + + +Here is an example to use `get_file_content`: + +```python +from pathlib import Path + +async def save_file_content(client, file_id: str, file_name: str, target_dir: Optional[Union[str, Path]] = None): + # Determine the target directory + path = Path(target_dir).expanduser().resolve() if target_dir else Path.cwd() + path.mkdir(parents=True, exist_ok=True) + + # Retrieve the file content + file_content_stream = await client.get_file_content(file_id) + if not file_content_stream: + raise RuntimeError(f"No content retrievable for file ID '{file_id}'.") + + # Collect all chunks asynchronously + chunks = [] + async for chunk in file_content_stream: + if isinstance(chunk, (bytes, bytearray)): + chunks.append(chunk) + else: + raise TypeError(f"Expected bytes or bytearray, got {type(chunk).__name__}") + + target_file_path = path / file_name + + # Write the collected content to the file synchronously + with open(target_file_path, "wb") as file: + for chunk in chunks: + file.write(chunk) +``` + #### Teardown To remove resources after completing tasks, use the following functions: @@ -478,6 +670,43 @@ project_client.agents.delete_agent(agent.id) print("Deleted agent") ``` + + +#### Tracing + +As part of Azure AI project, you can use the its connection string and observe the full execution path through Azure Monitor. Typically you might want to start tracing before you create an agent. + +Here is a code snip to be included above `create_agent`: + + + +```python +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor + +# Enable Azure Monitor tracing +application_insights_connection_string = project_client.telemetry.get_connection_string() +if not application_insights_connection_string: + print("Application Insights was not enabled for this project.") + print("Enable it via the 'Tracing' tab in your AI Studio project page.") + exit() +configure_azure_monitor(connection_string=application_insights_connection_string) + +scenario = os.path.basename(__file__) +tracer = trace.get_tracer(__name__) + +with tracer.start_as_current_span(scenario): + with project_client: +``` + + + +In additional, you might find helpful to see the tracing logs in console. You can achieve by the following code: + +```python +project_client.telemetry.enable(destination=sys.stdout) +``` + ## Troubleshooting ### Exceptions diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 47543c19d484..927431e8c3d7 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -1220,6 +1220,7 @@ async def create_run( async def create_and_process_run( self, thread_id: str, + *, assistant_id: str, model: Optional[str] = None, instructions: Optional[str] = None, diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py index 01949969e038..f23271b0fbb6 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py @@ -36,6 +36,7 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) +# [START create_agent_with_azure_ai_search_tool] conn_list = project_client.connections.list() conn_id = "" for conn in conn_list: @@ -58,6 +59,7 @@ tools=ai_search.definitions, headers={"x-ms-enable-preview": "true"}, ) + # [END create_agent_with_azure_ai_search_tool] print(f"Created agent, ID: {agent.id}") # Create thread for communication diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py index 5a7cbb265abc..c7ce63b52d7b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py @@ -71,6 +71,8 @@ # [START list_messages] messages = project_client.agents.list_messages(thread_id=thread.id) - # [END list_messages] + last_message_content = messages.data[-1].content[-1].text.value + print(f"Last message content: {last_message_content}") - print(f"messages: {messages}") + # [END list_messages] + print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py index fda3b45f3dc1..88d8229b8486 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics_with_azure_monitor_tracing.py @@ -27,8 +27,6 @@ import os, time from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential -from opentelemetry import trace -from azure.monitor.opentelemetry import configure_azure_monitor # Create an AI Project Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" @@ -39,8 +37,11 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) -# Enable Azure Monitor tracing +# [START enable_tracing] +from opentelemetry import trace +from azure.monitor.opentelemetry import configure_azure_monitor +# Enable Azure Monitor tracing application_insights_connection_string = project_client.telemetry.get_connection_string() if not application_insights_connection_string: print("Application Insights was not enabled for this project.") @@ -53,6 +54,8 @@ with tracer.start_as_current_span(scenario): with project_client: + + # [END enable_tracing] agent = project_client.agents.create_agent( model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" ) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py index d1e25464841a..a63364218c5f 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py @@ -36,6 +36,7 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) +# [START create_agent_with_bing_grounding_tool] bing_connection = project_client.connections.get(connection_name=os.environ["BING_CONNECTION_NAME"]) conn_id = bing_connection.id @@ -53,6 +54,8 @@ tools=bing.definitions, headers={"x-ms-enable-preview": "true"}, ) + # [END create_agent_with_bing_grounding_tool] + print(f"Created agent, ID: {agent.id}") # Create thread for communication diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py index c506099fb75c..e37851512dc1 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter.py @@ -79,17 +79,15 @@ project_client.agents.delete_file(file.id) print("Deleted file") + # [START get_messages_and_save_files] messages = project_client.agents.get_messages(thread_id=thread.id) print(f"Messages: {messages}") - last_msg = messages.get_last_text_message_by_sender("assistant") - if last_msg: - print(f"Last Message: {last_msg.text.value}") - for image_content in messages.image_contents: - print(f"Image File ID: {image_content.image_file.file_id}") - file_name = f"{image_content.image_file.file_id}_image_file.png" - project_client.agents.save_file(file_id=image_content.image_file.file_id, file_name=file_name) + file_id = image_content.image_file.file_id + print(f"Image File ID: {file_id}") + file_name = f"{file_id}_image_file.png" + project_client.agents.save_file(file_id=file_id, file_name=file_name) print(f"Saved image file to: {Path.cwd() / file_name}") for file_path_annotation in messages.file_path_annotations: @@ -99,6 +97,11 @@ print(f"File ID: {file_path_annotation.file_path.file_id}") print(f"Start Index: {file_path_annotation.start_index}") print(f"End Index: {file_path_annotation.end_index}") + # [END get_messages_and_save_files] + + last_msg = messages.get_last_text_message_by_sender("assistant") + if last_msg: + print(f"Last Message: {last_msg.text.value}") project_client.agents.delete_agent(agent.id) print("Deleted agent") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py index 922bdff36b30..7c810737a3a7 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py @@ -56,6 +56,7 @@ thread = project_client.agents.create_thread() print(f"Created thread, thread ID: {thread.id}") + # [START upload_file_and_create_message_with_code_interpreter] # We will upload the local file to Azure and will use it for vector store creation. _, asset_uri = project_client.upload_file("./product_info_1.md") ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) @@ -65,6 +66,8 @@ message = project_client.agents.create_message( thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] ) + # [END upload_file_and_create_message_with_code_interpreter] + print(f"Created message, message ID: {message.id}") run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py index 36ec4c6f4f34..2cce6f7e6939 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py @@ -36,6 +36,7 @@ with project_client: + # [START upload_file_and_create_agent_with_file_search] # We will upload the local file to Azure and will use it for vector store creation. _, asset_uri = project_client.upload_file("./product_info_1.md") @@ -55,6 +56,7 @@ tools=file_search_tool.definitions, tool_resources=file_search_tool.resources, ) + # [END upload_file_and_create_agent_with_file_search] print(f"Created agent, agent ID: {agent.id}") thread = project_client.agents.create_thread() diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py index 3cbaceab89a0..abc6acf0d62a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_with_resources_in_thread.py @@ -54,7 +54,6 @@ instructions="Hello, you are helpful assistant and can search information from uploaded files", tools=file_search.definitions, ) - # [END upload_file_create_vector_store_and_agent_with_file_search_tool] print(f"Created agent, ID: {agent.id}") From 7a1de64985800fa8d64273757f13b869c1189d16 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Mon, 11 Nov 2024 12:19:02 -0800 Subject: [PATCH 107/138] More fixes to mypy (#38446) * Fix unit tests and some type issues. * Fixes * Fix mypy --- .../azure/ai/projects/operations/_patch.py | 2 +- sdk/ai/azure-ai-projects/pyproject.toml | 18 +- ...tore_batch_enterprise_file_search_async.py | 49 ++- .../samples/agents/sample_agents_basics.py | 18 +- .../async_samples/sample_evaluations_async.py | 21 +- .../sample_evaluations_schedules.py | 14 +- .../tests/agents/test_agents_client.py | 346 +++++++++++++----- .../tests/agents/test_agents_client_async.py | 309 +++++++++++----- 8 files changed, 577 insertions(+), 200 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 144be9b900b0..e272cf0328de 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -418,7 +418,7 @@ def _get_log_exporter(destination: Union[TextIO, str, None]) -> Any: # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter try: from opentelemetry.sdk._logs.export import ConsoleLogExporter - except ModuleNotFoundError as _: + except ModuleNotFoundError as ex: # since OTel logging is still in beta in Python, we're going to swallow any errors # and just warn about them. logger.warning( diff --git a/sdk/ai/azure-ai-projects/pyproject.toml b/sdk/ai/azure-ai-projects/pyproject.toml index 3254a176e526..de7bf5a6932b 100644 --- a/sdk/ai/azure-ai-projects/pyproject.toml +++ b/sdk/ai/azure-ai-projects/pyproject.toml @@ -1,14 +1,18 @@ [tool.mypy] -python_version = "3.8" +python_version = "3.10" exclude = [ "downloaded", - # Types contains code, generated by typespec. - "_types.py", + # In run_mypy.py python version is hardcoded to 3.8. It does not allow + # obligatory named parameters as fun(a, *, b=1, c=2). + "sample_agents_vector_store_batch_enterprise_file_search_async\\.py", # Error in typing caused by the typespec. - "sample_agents_with_file_search_attachment.py", - "sample_agents_with_code_interpreter_file_attachment.py", - "sample_agents_code_interpreter_attachment_enterprise_search.py", - "sample_agents_with_file_search_attachment_async.py" + "sample_agents_with_file_search_attachment\\.py", + "sample_agents_with_code_interpreter_file_attachment\\.py", + "sample_agents_code_interpreter_attachment_enterprise_search\\.py", + "sample_agents_with_file_search_attachment_async\\.py", + "sample_agents_code_interpreter_attachment_enterprise_search_async\\.py", + "sample_agents_code_interpreter_attachment_enterprise_search_async\\.py", + "sample_agents_code_interpreter_attachment_async\\.py" ] warn_unused_configs = true ignore_missing_imports = true diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py index 273f640bcdd6..f0b655278f0a 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py @@ -23,7 +23,11 @@ import os from azure.ai.projects.aio import AIProjectClient -from azure.ai.projects.models import FileSearchTool, VectorStoreDataSource, VectorStoreDataSourceAssetType +from azure.ai.projects.models import ( + FileSearchTool, + VectorStoreDataSource, + VectorStoreDataSourceAssetType, +) from azure.identity.aio import DefaultAzureCredential @@ -41,15 +45,24 @@ async def main(): # We will upload the local file to Azure and will use it for vector store creation. _, asset_uri = project_client.upload_file("../product_info_1.md") - ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) - vector_store = await project_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + ds = VectorStoreDataSource( + asset_identifier=asset_uri, + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + vector_store = await project_client.agents.create_vector_store_and_poll( + file_ids=[], name="sample_vector_store" + ) print(f"Created vector store, vector store ID: {vector_store.id}") # add the file to the vector store or you can supply file ids in the vector store creation - vector_store_file_batch = await project_client.agents.create_vector_store_file_batch_and_poll( - vector_store_id=vector_store.id, data_sources=[ds] + vector_store_file_batch = ( + await project_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] + ) + ) + print( + f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}" ) - print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") # create a file search tool file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) @@ -68,18 +81,26 @@ async def main(): print(f"Created thread, thread ID: {thread.id}") message = await project_client.agents.create_message( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + thread_id=thread.id, + role="user", + content="What feature does Smart Eyewear offer?", ) print(f"Created message, message ID: {message.id}") - run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = await project_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) print(f"Created run, run ID: {run.id}") await file_search_tool.remove_vector_store(vector_store.id) - print(f"Removed vector store from file search, vector store ID: {vector_store.id}") + print( + f"Removed vector store from file search, vector store ID: {vector_store.id}" + ) await project_client.agents.update_agent( - assistant_id=agent.id, tools=file_search_tool.definitions, tool_resources=file_search_tool.resources + assistant_id=agent.id, + tools=file_search_tool.definitions, + tool_resources=file_search_tool.resources, ) print(f"Updated agent, agent ID: {agent.id}") @@ -87,11 +108,15 @@ async def main(): print(f"Created thread, thread ID: {thread.id}") message = await project_client.agents.create_message( - thread_id=thread.id, role="user", content="What feature does Smart Eyewear offer?" + thread_id=thread.id, + role="user", + content="What feature does Smart Eyewear offer?", ) print(f"Created message, message ID: {message.id}") - run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = await project_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) print(f"Created run, run ID: {run.id}") await project_client.agents.delete_vector_store(vector_store.id) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py index c7ce63b52d7b..5bc6b98a221b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py @@ -24,6 +24,7 @@ import os, time from azure.ai.projects import AIProjectClient from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import MessageTextContent # Create an Azure AI Client from a connection string, copied from your AI Studio project. # At the moment, it should be in the format ";;;" @@ -40,7 +41,9 @@ # [START create_agent] agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", ) # [END create_agent] print(f"Created agent, agent ID: {agent.id}") @@ -51,7 +54,9 @@ print(f"Created thread, thread ID: {thread.id}") # [START create_message] - message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = project_client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) # [END create_message] print(f"Created message, message ID: {message.id}") @@ -71,8 +76,13 @@ # [START list_messages] messages = project_client.agents.list_messages(thread_id=thread.id) - last_message_content = messages.data[-1].content[-1].text.value - print(f"Last message content: {last_message_content}") + + # The messages are following in the reverse order, + # we will iterate them and output only text contents. + for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") # [END list_messages] print(f"Messages: {messages}") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py index 0b72c35f469b..09c251e93429 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py @@ -25,7 +25,12 @@ from azure.ai.projects.aio import AIProjectClient from azure.identity.aio import DefaultAzureCredential -from azure.ai.projects.models import Evaluation, Dataset, EvaluatorConfiguration, ConnectionType +from azure.ai.projects.models import ( + Evaluation, + Dataset, + EvaluatorConfiguration, + ConnectionType, +) from azure.ai.evaluation import F1ScoreEvaluator, RelevanceEvaluator, ViolenceEvaluator @@ -38,7 +43,9 @@ async def main(): # Upload data for evaluation data_id, _ = project_client.upload_file("./data/evaluate_test_data.jsonl") - default_connection = await project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) + default_connection = await project_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI + ) deployment_name = "<>" api_version = "<>" @@ -75,12 +82,18 @@ async def main(): evaluation_response = await project_client.evaluations.create(evaluation) # Get evaluation - get_evaluation_response = await project_client.evaluations.get(evaluation_response.id) + get_evaluation_response = await project_client.evaluations.get( + evaluation_response.id + ) print("----------------------------------------------------------------") print("Created evaluation, evaluation ID: ", get_evaluation_response.id) print("Evaluation status: ", get_evaluation_response.status) - print("AI Studio URI: ", get_evaluation_response.properties["AiStudioEvaluationUri"]) + if isinstance(get_evaluation_response.properties, dict): + print( + "AI Studio URI: ", + get_evaluation_response.properties["AiStudioEvaluationUri"], + ) print("----------------------------------------------------------------") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py index d4db1e281bfd..a9e1b18da935 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py @@ -36,7 +36,12 @@ def main(): f1_evaluator_config = EvaluatorConfiguration( id="azureml://registries/model-evaluation-dev-01/models/F1ScoreEval/versions/1", - init_params={"column_mapping": {"response": "${data.message}", "ground_truth": "${data.itemType}"}}, + init_params={ + "column_mapping": { + "response": "${data.message}", + "ground_truth": "${data.itemType}", + } + }, ) recurrence_trigger = RecurrenceTrigger(frequency="daily", interval=1) @@ -54,11 +59,12 @@ def main(): trigger=recurrence_trigger, description=description, tags=tags, - properties=properties, ) - evaluation_schedule = ai_client.evaluations.create_or_replace_schedule(name, evaluation_schedule) - print(evaluation_schedule.provisioning_status) + evaluation_schedule = ai_client.evaluations.create_or_replace_schedule( + name, evaluation_schedule + ) + print(evaluation_schedule.provisioning_state) print(evaluation_schedule) # Sample for get an evaluation schedule with name diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index 83cc23838f51..38830e92068b 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -17,7 +17,11 @@ import functools from azure.ai.projects import AIProjectClient -from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy +from devtools_testutils import ( + AzureRecordedTestCase, + EnvironmentVariableLoader, + recorded_by_proxy, +) from azure.ai.projects.models import ( CodeInterpreterTool, CodeInterpreterToolResource, @@ -128,7 +132,9 @@ def create_client(self, **kwargs): def _get_data_file(self) -> str: """Return the test file name.""" - return os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") + return os.path.join( + os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md" + ) # for debugging purposes: if a test fails and its agent has not been deleted, it will continue to show up in the agents list """ @@ -183,12 +189,16 @@ def test_create_update_delete_agent(self, **kwargs): print("Created client") # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) # update agent - agent = client.agents.update_agent(agent.id, name="my-agent2", instructions="You are helpful agent") + agent = client.agents.update_agent( + agent.id, name="my-agent2", instructions="You are helpful agent" + ) assert agent.name == "my-agent2" # delete agent and close client @@ -209,13 +219,21 @@ def test_create_agent_with_tools(self, **kwargs): # create agent with tools agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=functions.definitions + model="gpt-4o", + name="my-agent", + instructions="You are helpful agent", + tools=functions.definitions, ) assert agent.id print("Created agent, agent ID", agent.id) assert agent.tools - assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] - print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + assert ( + agent.tools[0]["function"]["name"] + == functions.definitions[0]["function"]["name"] + ) + print( + "Tool successfully submitted:", functions.definitions[0]["function"]["name"] + ) # delete agent and close client client.agents.delete_agent(agent.id) @@ -235,13 +253,21 @@ def test_create_agent_with_tools_and_resources(self, **kwargs): # create agent with tools agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=functions.definitions + model="gpt-4o", + name="my-agent", + instructions="You are helpful agent", + tools=functions.definitions, ) assert agent.id print("Created agent, agent ID", agent.id) assert agent.tools - assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] - print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + assert ( + agent.tools[0]["function"]["name"] + == functions.definitions[0]["function"]["name"] + ) + print( + "Tool successfully submitted:", functions.definitions[0]["function"]["name"] + ) # delete agent and close client client.agents.delete_agent(agent.id) @@ -256,7 +282,9 @@ def test_update_agent(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id # update agent and confirm changes went through @@ -316,7 +344,9 @@ def test_create_thread(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -340,7 +370,9 @@ def test_get_thread(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -439,7 +471,9 @@ def test_create_message(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -449,7 +483,9 @@ def test_create_message(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) @@ -467,7 +503,9 @@ def test_create_multiple_messages(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -477,13 +515,19 @@ def test_create_multiple_messages(self, **kwargs): print("Created thread, thread ID", thread.id) # create messages - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) - message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") + message2 = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me another joke" + ) assert message2.id print("Created message, message ID", message2.id) - message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") + message3 = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a third joke" + ) assert message3.id print("Created message, message ID", message3.id) @@ -501,7 +545,9 @@ def test_list_messages(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -516,21 +562,29 @@ def test_list_messages(self, **kwargs): assert messages0.data.__len__() == 0 # create messages and check message list for each one - message1 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message1 = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message1.id print("Created message, message ID", message1.id) messages1 = client.agents.list_messages(thread_id=thread.id) assert messages1.data.__len__() == 1 assert messages1.data[0].id == message1.id - message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") + message2 = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me another joke" + ) assert message2.id print("Created message, message ID", message2.id) messages2 = client.agents.list_messages(thread_id=thread.id) assert messages2.data.__len__() == 2 - assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id + assert ( + messages2.data[0].id == message2.id or messages2.data[1].id == message2.id + ) - message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") + message3 = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a third joke" + ) assert message3.id print("Created message, message ID", message3.id) messages3 = client.agents.list_messages(thread_id=thread.id) @@ -555,7 +609,9 @@ def test_get_message(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -565,7 +621,9 @@ def test_get_message(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) @@ -630,7 +688,9 @@ def test_create_run(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -658,7 +718,9 @@ def test_get_run(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -693,7 +755,9 @@ def test_run_status(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -703,7 +767,9 @@ def test_run_status(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) @@ -837,7 +903,10 @@ def test_submit_tool_outputs_to_run(self, **kwargs): # create agent agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent", toolset=toolset + model="gpt-4o", + name="my-agent", + instructions="You are helpful agent", + toolset=toolset, ) assert agent.id print("Created agent, agent ID", agent.id) @@ -848,7 +917,9 @@ def test_submit_tool_outputs_to_run(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, what time is it?" + ) assert message.id print("Created message, message ID", message.id) @@ -859,8 +930,13 @@ def test_submit_tool_outputs_to_run(self, **kwargs): # check that tools are uploaded assert run.tools - assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] - print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + assert ( + run.tools[0]["function"]["name"] + == functions.definitions[0]["function"]["name"] + ) + print( + "Tool successfully submitted:", functions.definitions[0]["function"]["name"] + ) # check status assert run.status in [ @@ -878,7 +954,10 @@ def test_submit_tool_outputs_to_run(self, **kwargs): run = client.agents.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed - if run.status == "requires_action" and run.required_action.submit_tool_outputs: + if ( + run.status == "requires_action" + and run.required_action.submit_tool_outputs + ): print("Requires action: submit tool outputs") tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: @@ -889,7 +968,9 @@ def test_submit_tool_outputs_to_run(self, **kwargs): break # submit tool outputs to run - tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here + tool_outputs = toolset.execute_tool_calls( + tool_calls + ) # TODO issue somewhere here print("Tool outputs:", tool_outputs) if tool_outputs: client.agents.submit_tool_outputs_to_run( @@ -971,7 +1052,9 @@ def test_create_thread_and_run(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -1023,7 +1106,9 @@ def test_list_run_step(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -1033,7 +1118,9 @@ def test_list_run_step(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") + message = client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, what time is it?" + ) assert message.id print("Created message, message ID", message.id) @@ -1052,7 +1139,12 @@ def test_list_run_step(self, **kwargs): # wait for a second time.sleep(1) run = client.agents.get_run(thread_id=thread.id, run_id=run.id) - assert run.status in ["queued", "in_progress", "requires_action", "completed"] + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "completed", + ] print("Run status:", run.status) steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) assert steps["data"].__len__() > 0 # TODO what else should we look at? @@ -1075,7 +1167,9 @@ def test_get_run_step(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -1111,14 +1205,21 @@ def test_get_run_step(self, **kwargs): assert run.last_error print(run.last_error) print("FAILED HERE") - assert run.status in ["queued", "in_progress", "requires_action", "completed"] + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "completed", + ] print("Run status:", run.status) # list steps, check that get_run_step works with first step_id steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) assert steps["data"].__len__() > 0 step = steps["data"][0] - get_step = client.agents.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + get_step = client.agents.get_run_step( + thread_id=thread.id, run_id=run.id, step_id=step.id + ) assert step == get_step # delete agent and close client @@ -1207,7 +1308,7 @@ def _do_test_create_vector_store(self, **kwargs): else: ds = [ VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], + asset_identifier=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -1227,14 +1328,15 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): ds = [ VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], + asset_identifier=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] fs = FileSearchToolResource( vector_stores=[ VectorStoreConfigurations( - store_name="my_vector_store", store_configuration=VectorStoreConfiguration(data_sources=ds) + store_name="my_vector_store", + store_configuration=VectorStoreConfiguration(data_sources=ds), ) ] ) @@ -1248,7 +1350,9 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): ) assert agent.id - thread = ai_client.agents.create_thread(tool_resources=ToolResources(file_search=fs)) + thread = ai_client.agents.create_thread( + tool_resources=ToolResources(file_search=fs) + ) assert thread.id # create message message = ai_client.agents.create_message( @@ -1256,7 +1360,9 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): ) assert message.id, "The message was not created." - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) assert run.status == "completed", f"Error in run: {run.last_error}" messages = ai_client.agents.list_messages(thread.id) assert len(messages) @@ -1267,10 +1373,12 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): @recorded_by_proxy def test_create_vector_store_add_file_file_id(self, **kwargs): """Test adding single file to vector store withn file ID.""" - self._do_test_create_vector_store_add_file(file_path=self._get_data_file(), **kwargs) + self._do_test_create_vector_store_add_file( + file_path=self._get_data_file(), **kwargs + ) @agentClientPreparer() - @pytest.mark.skip("The CreateVectorStoreFile API is not supported yet.") + # @pytest.markp("The CreateVectorStoreFile API is not supported yet.") @recorded_by_proxy def test_create_vector_store_add_file_azure(self, **kwargs): """Test adding single file to vector store with azure asset ID.""" @@ -1286,8 +1394,15 @@ def _do_test_create_vector_store_add_file(self, **kwargs): if file_id: ds = None else: - ds = [VectorStoreDataSource(storage_uri=kwargs["azure_ai_projects_data_path"], asset_type="uri_asset")] - vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + ds = [ + VectorStoreDataSource( + asset_identifier=kwargs["azure_ai_projects_data_path"], + asset_type="uri_asset", + ) + ] + vector_store = ai_client.agents.create_vector_store_and_poll( + file_ids=[], name="sample_vector_store" + ) assert vector_store.id vector_store_file = ai_client.agents.create_vector_store_file( vector_store_id=vector_store.id, data_sources=ds, file_id=file_id @@ -1299,10 +1414,12 @@ def _do_test_create_vector_store_add_file(self, **kwargs): @recorded_by_proxy def test_create_vector_store_batch_file_ids(self, **kwargs): """Test adding multiple files to vector store with file IDs.""" - self._do_test_create_vector_store_batch(file_path=self._get_data_file(), **kwargs) + self._do_test_create_vector_store_batch( + file_path=self._get_data_file(), **kwargs + ) @agentClientPreparer() - @pytest.mark.skip("The CreateFileBatch API is not supported yet.") + # @pytest.markp("The CreateFileBatch API is not supported yet.") @recorded_by_proxy def test_create_vector_store_batch_azure(self, **kwargs): """Test adding multiple files to vector store with azure asset IDs.""" @@ -1322,14 +1439,18 @@ def _do_test_create_vector_store_batch(self, **kwargs): file_ids = None ds = [ VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], + asset_identifier=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + vector_store = ai_client.agents.create_vector_store_and_poll( + file_ids=[], name="sample_vector_store" + ) assert vector_store.id - vector_store_file_batch = ai_client.agents.create_vector_store_file_batch_and_poll( - vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids + vector_store_file_batch = ( + ai_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids + ) ) assert vector_store_file_batch.id self._test_file_search(ai_client, vector_store, file_id) @@ -1360,7 +1481,9 @@ def _test_file_search( ) assert message.id, "The message was not created." - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) ai_client.agents.delete_vector_store(vector_store.id) assert run.status == "completed", f"Error in run: {run.last_error}" messages = ai_client.agents.list_messages(thread.id) @@ -1370,12 +1493,13 @@ def _test_file_search( ai_client.close() @agentClientPreparer() - @pytest.mark.skip("The CreateFileBatch API is not supported yet.") + @pytest.mark.skip("The API is not supported yet.") @recorded_by_proxy def test_message_attachement_azure(self, **kwargs): """Test message attachment with azure ID.""" ds = VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET + asset_identifier=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_message_attachment(data_sources=[ds], **kwargs) @@ -1408,14 +1532,22 @@ def _do_test_message_attachment(self, **kwargs): attachment = MessageAttachment( file_id=file_id, data_sources=kwargs.get("data_sources"), - tools=[FileSearchTool().definitions[0], CodeInterpreterTool().definitions[0]], + tools=[ + FileSearchTool().definitions[0], + CodeInterpreterTool().definitions[0], + ], ) message = ai_client.agents.create_message( - thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + thread_id=thread.id, + role="user", + content="What does the attachment say?", + attachments=[attachment], ) assert message.id, "The message was not created." - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) assert run.id, "The run was not created." self._remove_file_maybe(file_id, ai_client) ai_client.agents.delete_agent(agent.id) @@ -1424,11 +1556,13 @@ def _do_test_message_attachment(self, **kwargs): assert len(messages), "No messages were created" @agentClientPreparer() + @pytest.mark.skip("The API is not supported yet.") @recorded_by_proxy def test_create_assistant_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET + asset_identifier=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) @@ -1436,7 +1570,9 @@ def test_create_assistant_with_interpreter_azure(self, **kwargs): @recorded_by_proxy def test_create_assistant_with_interpreter_file_ids(self, **kwargs): """Test Create assistant with code interpreter with file IDs.""" - self._do_test_create_assistant_with_interpreter(file_path=self._get_data_file(), **kwargs) + self._do_test_create_assistant_with_interpreter( + file_path=self._get_data_file(), **kwargs + ) def _do_test_create_assistant_with_interpreter(self, **kwargs): """Test create assistant with code interpreter and project asset id""" @@ -1447,12 +1583,15 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = ai_client.agents.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS) + file = ai_client.agents.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS + ) assert file.id, "The file was not uploaded." file_id = file.id cdr = CodeInterpreterToolResource( - file_ids=[file_id] if file_id else None, data_sources=kwargs.get("data_sources") + file_ids=[file_id] if file_id else None, + data_sources=kwargs.get("data_sources"), ) tr = ToolResources(code_interpreter=cdr) # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment @@ -1473,19 +1612,25 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): ) assert message.id, "The message was not created." - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) assert run.id, "The run was not created." self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" ai_client.agents.delete_agent(agent.id) - assert len(ai_client.agents.list_messages(thread_id=thread.id)), "No messages were created" + assert len( + ai_client.agents.list_messages(thread_id=thread.id) + ), "No messages were created" @agentClientPreparer() + @pytest.mark.skip("The API is not supported yet.") @recorded_by_proxy def test_create_thread_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET + asset_identifier=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) @@ -1493,7 +1638,9 @@ def test_create_thread_with_interpreter_azure(self, **kwargs): @recorded_by_proxy def test_create_thread_with_interpreter_file_ids(self, **kwargs): """Test Create assistant with code interpreter with file IDs.""" - self._do_test_create_thread_with_interpreter(file_path=self._get_data_file(), **kwargs) + self._do_test_create_thread_with_interpreter( + file_path=self._get_data_file(), **kwargs + ) def _do_test_create_thread_with_interpreter(self, **kwargs): """Test create assistant with code interpreter and project asset id""" @@ -1504,12 +1651,15 @@ def _do_test_create_thread_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = ai_client.agents.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS) + file = ai_client.agents.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS + ) assert file.id, "The file was not uploaded." file_id = file.id cdr = CodeInterpreterToolResource( - file_ids=[file_id] if file_id else None, data_sources=kwargs.get("data_sources") + file_ids=[file_id] if file_id else None, + data_sources=kwargs.get("data_sources"), ) tr = ToolResources(code_interpreter=cdr) # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment @@ -1529,7 +1679,9 @@ def _do_test_create_thread_with_interpreter(self, **kwargs): ) assert message.id, "The message was not created." - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) assert run.id, "The run was not created." self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" @@ -1547,14 +1699,15 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): ds = [ VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], + asset_identifier=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] fs = FileSearchToolResource( vector_stores=[ VectorStoreConfigurations( - store_name="my_vector_store", store_configuration=VectorStoreConfiguration(data_sources=ds) + store_name="my_vector_store", + store_configuration=VectorStoreConfiguration(data_sources=ds), ) ] ) @@ -1576,7 +1729,9 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): ) assert message.id, "The message was not created." - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) assert run.status == "completed", f"Error in run: {run.last_error}" messages = ai_client.agents.list_messages(thread.id) assert len(messages) @@ -1584,11 +1739,13 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): ai_client.close() @agentClientPreparer() + @pytest.mark.skip("The API is not supported yet.") @recorded_by_proxy def test_create_attachment_in_thread_azure(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" ds = VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET + asset_identifier=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) self._do_test_create_attachment_in_thread_azure(data_sources=[ds], **kwargs) @@ -1596,7 +1753,9 @@ def test_create_attachment_in_thread_azure(self, **kwargs): @recorded_by_proxy def test_create_attachment_in_thread_file_ids(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" - self._do_test_create_attachment_in_thread_azure(file_path=self._get_data_file(), **kwargs) + self._do_test_create_attachment_in_thread_azure( + file_path=self._get_data_file(), **kwargs + ) def _do_test_create_attachment_in_thread_azure(self, **kwargs): # create client @@ -1618,13 +1777,22 @@ def _do_test_create_attachment_in_thread_azure(self, **kwargs): attachment = MessageAttachment( file_id=file_id, data_sources=kwargs.get("data_sources"), - tools=[FileSearchTool().definitions[0], CodeInterpreterTool().definitions[0]], + tools=[ + FileSearchTool().definitions[0], + CodeInterpreterTool().definitions[0], + ], + ) + message = ThreadMessageOptions( + role="user", + content="What does the attachment say?", + attachments=[attachment], ) - message = ThreadMessageOptions(role="user", content="What does the attachment say?", attachments=[attachment]) thread = ai_client.agents.create_thread(messages=[message]) assert thread.id - run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) assert run.status == "completed", f"Error in run: {run.last_error}" messages = ai_client.agents.list_messages(thread.id) assert len(messages) @@ -1634,7 +1802,9 @@ def _do_test_create_attachment_in_thread_azure(self, **kwargs): def _get_file_id_maybe(self, ai_client: AIProjectClient, **kwargs) -> str: """Return file id if kwargs has file path.""" if "file_path" in kwargs: - file = ai_client.agents.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS) + file = ai_client.agents.upload_file_and_poll( + file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS + ) assert file.id, "The file was not uploaded." return file.id return None @@ -1687,7 +1857,9 @@ def test_code_interpreter_and_save_file(self, **kwargs): print(f"Created message, message ID: {message.id}") # create run - run = client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) print(f"Run finished with status: {run.status}") # delete file @@ -1706,7 +1878,9 @@ def test_code_interpreter_and_save_file(self, **kwargs): file_id = file_path_annotation.file_path.file_id print(f"Image File ID: {file_path_annotation.file_path.file_id}") temp_file_path = os.path.join(temp_dir, "output.png") - client.agents.save_file(file_id=file_id, file_name="output.png", target_dir=temp_dir) + client.agents.save_file( + file_id=file_id, file_name="output.png", target_dir=temp_dir + ) output_file_exist = os.path.exists(temp_file_path) assert output_file_exist diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py index a331928a03e0..693aca2dbb6d 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py @@ -103,8 +103,8 @@ def fetch_current_datetime_recordings(): # Statically defined user functions for fast reference -user_functions_recording = {"fetch_current_datetime": fetch_current_datetime_recordings} -user_functions_live = {"fetch_current_datetime": fetch_current_datetime_live} +user_functions_recording = {fetch_current_datetime_recordings} +user_functions_live = {fetch_current_datetime_live} # The test class name needs to start with "Test" to get collected by pytest @@ -126,7 +126,9 @@ def create_client(self, **kwargs): def _get_data_file(self) -> str: """Return the test file name.""" - return os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") + return os.path.join( + os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md" + ) # for debugging purposes: if a test fails and its agent has not been deleted, it will continue to show up in the agents list """ @@ -181,7 +183,9 @@ async def test_create_delete_agent(self, **kwargs): print("Created client") # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -203,13 +207,21 @@ async def test_create_agent_with_tools(self, **kwargs): # create agent with tools agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent", tools=functions.definitions + model="gpt-4o", + name="my-agent", + instructions="You are helpful agent", + tools=functions.definitions, ) assert agent.id print("Created agent, agent ID", agent.id) assert agent.tools - assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] - print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + assert ( + agent.tools[0]["function"]["name"] + == functions.definitions[0]["function"]["name"] + ) + print( + "Tool successfully submitted:", functions.definitions[0]["function"]["name"] + ) # delete agent and close client await client.agents.delete_agent(agent.id) @@ -224,11 +236,13 @@ async def test_update_agent(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id # update agent and confirm changes went through - await agent.update(name="my-agent2", instructions="You are helpful agent") + agent.update(name="my-agent2", instructions="You are helpful agent") assert agent.name assert agent.name == "my-agent2" @@ -284,7 +298,9 @@ async def test_create_thread(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -308,7 +324,9 @@ async def test_get_thread(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -407,7 +425,9 @@ async def test_create_message(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -417,7 +437,9 @@ async def test_create_message(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = await client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) @@ -435,7 +457,9 @@ async def test_create_multiple_messages(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -445,7 +469,9 @@ async def test_create_multiple_messages(self, **kwargs): print("Created thread, thread ID", thread.id) # create messages - message = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = await client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) message2 = await client.agents.create_message( @@ -473,7 +499,9 @@ async def test_list_messages(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -488,7 +516,9 @@ async def test_list_messages(self, **kwargs): assert messages0.data.__len__() == 0 # create messages and check message list for each one - message1 = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message1 = await client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message1.id print("Created message, message ID", message1.id) messages1 = await client.agents.list_messages(thread_id=thread.id) @@ -502,7 +532,9 @@ async def test_list_messages(self, **kwargs): print("Created message, message ID", message2.id) messages2 = await client.agents.list_messages(thread_id=thread.id) assert messages2.data.__len__() == 2 - assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id + assert ( + messages2.data[0].id == message2.id or messages2.data[1].id == message2.id + ) message3 = await client.agents.create_message( thread_id=thread.id, role="user", content="Hello, tell me a third joke" @@ -531,7 +563,9 @@ async def test_get_message(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -541,12 +575,16 @@ async def test_get_message(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = await client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) # get message - message2 = await client.agents.get_message(thread_id=thread.id, message_id=message.id) + message2 = await client.agents.get_message( + thread_id=thread.id, message_id=message.id + ) assert message2.id assert message.id == message2.id print("Got message, message ID", message.id) @@ -606,7 +644,9 @@ async def test_create_run(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -634,7 +674,9 @@ async def test_get_run(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -669,7 +711,9 @@ async def test_run_status(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -679,7 +723,9 @@ async def test_run_status(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") + message = await client.agents.create_message( + thread_id=thread.id, role="user", content="Hello, tell me a joke" + ) assert message.id print("Created message, message ID", message.id) @@ -813,7 +859,10 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): # create agent agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent", toolset=toolset + model="gpt-4o", + name="my-agent", + instructions="You are helpful agent", + toolset=toolset, ) assert agent.id print("Created agent, agent ID", agent.id) @@ -837,8 +886,13 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): # check that tools are uploaded assert run.tools - assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] - print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) + assert ( + run.tools[0]["function"]["name"] + == functions.definitions[0]["function"]["name"] + ) + print( + "Tool successfully submitted:", functions.definitions[0]["function"]["name"] + ) # check status assert run.status in [ @@ -856,9 +910,12 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): run = await client.agents.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed - if run.status == "requires_action" and run.required_action.submit_tool_outputs: + if ( + run.status == "requires_action" + and run.required_action.submit_tool_outputs + ): print("Requires action: submit tool outputs") - tool_calls = await run.required_action.submit_tool_outputs.tool_calls + tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: print( "No tool calls provided - cancelling run" @@ -867,7 +924,9 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): break # submit tool outputs to run - tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here + tool_outputs = toolset.execute_tool_calls( + tool_calls + ) # TODO issue somewhere here print("Tool outputs:", tool_outputs) if tool_outputs: await client.agents.submit_tool_outputs_to_run( @@ -949,7 +1008,9 @@ async def test_create_thread_and_run(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -997,11 +1058,13 @@ async def test_list_run_step(self, **kwargs): time.sleep(50) # create client - client = await self.create_client(**kwargs) + client = self.create_client(**kwargs) assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -1032,9 +1095,16 @@ async def test_list_run_step(self, **kwargs): # wait for a second time.sleep(1) run = await client.agents.get_run(thread_id=thread.id, run_id=run.id) - assert run.status in ["queued", "in_progress", "requires_action", "completed"] + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "completed", + ] print("Run status:", run.status) - steps = await client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) + steps = await client.agents.list_run_steps( + thread_id=thread.id, run_id=run.id + ) assert steps["data"].__len__() > 0 # TODO what else should we look at? assert run.status == "completed" @@ -1055,7 +1125,9 @@ async def test_get_run_step(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") + agent = await client.agents.create_agent( + model="gpt-4o", name="my-agent", instructions="You are helpful agent" + ) assert agent.id print("Created agent, agent ID", agent.id) @@ -1091,14 +1163,21 @@ async def test_get_run_step(self, **kwargs): assert run.last_error print(run.last_error) print("FAILED HERE") - assert run.status in ["queued", "in_progress", "requires_action", "completed"] + assert run.status in [ + "queued", + "in_progress", + "requires_action", + "completed", + ] print("Run status:", run.status) # list steps, check that get_run_step works with first step_id steps = await client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) assert steps["data"].__len__() > 0 step = steps["data"][0] - get_step = await client.agents.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) + get_step = await client.agents.get_run_step( + thread_id=thread.id, run_id=run.id, step_id=step.id + ) assert step == get_step # delete agent and close client @@ -1116,7 +1195,9 @@ async def test_create_vector_store_azure(self, **kwargs): @recorded_by_proxy_async async def test_create_vector_store_file_id(self, **kwargs): """Test the agent with vector store creation.""" - await self._do_test_create_vector_store(file_path=self._get_data_file(), **kwargs) + await self._do_test_create_vector_store( + file_path=self._get_data_file(), **kwargs + ) async def _do_test_create_vector_store(self, **kwargs): """Test the agent with vector store creation.""" @@ -1131,7 +1212,7 @@ async def _do_test_create_vector_store(self, **kwargs): else: ds = [ VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], + asset_identifier=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] @@ -1145,10 +1226,11 @@ async def _do_test_create_vector_store(self, **kwargs): @recorded_by_proxy_async async def test_create_vector_store_add_file_file_id(self, **kwargs): """Test adding single file to vector store withn file ID.""" - await self._do_test_create_vector_store_add_file(file_path=self._get_data_file(), **kwargs) + await self._do_test_create_vector_store_add_file( + file_path=self._get_data_file(), **kwargs + ) @agentClientPreparer() - @pytest.mark.skip("The CreateVectorStoreFile API is not supported yet.") @recorded_by_proxy_async async def test_create_vector_store_add_file_azure(self, **kwargs): """Test adding single file to vector store with azure asset ID.""" @@ -1166,11 +1248,13 @@ async def _do_test_create_vector_store_add_file(self, **kwargs): else: ds = [ VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], + asset_identifier=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + vector_store = await ai_client.agents.create_vector_store_and_poll( + file_ids=[], name="sample_vector_store" + ) assert vector_store.id vector_store_file = await ai_client.agents.create_vector_store_file( vector_store_id=vector_store.id, data_sources=ds, file_id=file_id @@ -1182,10 +1266,11 @@ async def _do_test_create_vector_store_add_file(self, **kwargs): @recorded_by_proxy_async async def test_create_vector_store_batch_file_ids(self, **kwargs): """Test adding multiple files to vector store with file IDs.""" - await self._do_test_create_vector_store_batch(file_path=self._get_data_file(), **kwargs) + await self._do_test_create_vector_store_batch( + file_path=self._get_data_file(), **kwargs + ) @agentClientPreparer() - @pytest.mark.skip("The CreateFileBatch API is not supported yet.") @recorded_by_proxy_async async def test_create_vector_store_batch_azure(self, **kwargs): """Test adding multiple files to vector store with azure asset IDs.""" @@ -1205,19 +1290,25 @@ async def _do_test_create_vector_store_batch(self, **kwargs): file_ids = None ds = [ VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], + asset_identifier=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") + vector_store = await ai_client.agents.create_vector_store_and_poll( + file_ids=[], name="sample_vector_store" + ) assert vector_store.id - vector_store_file_batch = await ai_client.agents.create_vector_store_file_batch_and_poll( - vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids + vector_store_file_batch = ( + await ai_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids + ) ) assert vector_store_file_batch.id await self._test_file_search(ai_client, vector_store, file_id) - async def _test_file_search(self, ai_client: AIProjectClient, vector_store: VectorStore, file_id: str) -> None: + async def _test_file_search( + self, ai_client: AIProjectClient, vector_store: VectorStore, file_id: str + ) -> None: """Test the file search""" file_search = FileSearchTool(vector_store_ids=[vector_store.id]) agent = await ai_client.agents.create_agent( @@ -1236,7 +1327,9 @@ async def _test_file_search(self, ai_client: AIProjectClient, vector_store: Vect ) assert message.id, "The message was not created." - run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = await ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) await ai_client.agents.delete_vector_store(vector_store.id) assert run.status == "completed" messages = await ai_client.agents.list_messages(thread_id=thread.id) @@ -1248,12 +1341,13 @@ async def _test_file_search(self, ai_client: AIProjectClient, vector_store: Vect await ai_client.close() @agentClientPreparer() - @pytest.mark.skip("The CreateFileBatch API is not supported yet.") + @pytest.mark.skip("The API is not supported yet.") @recorded_by_proxy_async async def test_message_attachement_azure(self, **kwargs): """Test message attachment with azure ID.""" ds = VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET + asset_identifier=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_message_attachment(data_sources=[ds], **kwargs) @@ -1261,7 +1355,9 @@ async def test_message_attachement_azure(self, **kwargs): @recorded_by_proxy_async async def test_message_attachement_file_ids(self, **kwargs): """Test message attachment with file ID.""" - await self._do_test_message_attachment(file_path=self._get_data_file(), **kwargs) + await self._do_test_message_attachment( + file_path=self._get_data_file(), **kwargs + ) async def _do_test_message_attachment(self, **kwargs): """Test agent with the message attachment.""" @@ -1286,14 +1382,22 @@ async def _do_test_message_attachment(self, **kwargs): attachment = MessageAttachment( file_id=file_id, data_sources=kwargs.get("data_sources"), - tools=[FileSearchTool().definitions[0], CodeInterpreterTool().definitions[0]], + tools=[ + FileSearchTool().definitions[0], + CodeInterpreterTool().definitions[0], + ], ) message = await ai_client.agents.create_message( - thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] + thread_id=thread.id, + role="user", + content="What does the attachment say?", + attachments=[attachment], ) assert message.id, "The message was not created." - run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = await ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) assert run.id, "The run was not created." await self._remove_file_maybe(file_id, ai_client) await ai_client.agents.delete_agent(agent.id) @@ -1311,14 +1415,15 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): ds = [ VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], + asset_identifier=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] fs = FileSearchToolResource( vector_stores=[ VectorStoreConfigurations( - store_name="my_vector_store", store_configuration=VectorStoreConfiguration(data_sources=ds) + store_name="my_vector_store", + store_configuration=VectorStoreConfiguration(data_sources=ds), ) ] ) @@ -1332,7 +1437,9 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): ) assert agent.id - thread = await ai_client.agents.create_thread(tool_resources=ToolResources(file_search=fs)) + thread = await ai_client.agents.create_thread( + tool_resources=ToolResources(file_search=fs) + ) assert thread.id # create message message = await ai_client.agents.create_message( @@ -1340,7 +1447,9 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): ) assert message.id, "The message was not created." - run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = await ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) assert run.status == "completed", f"Error in run: {run.last_error}" messages = await ai_client.agents.list_messages(thread.id) assert len(messages) @@ -1348,19 +1457,25 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): await ai_client.close() @agentClientPreparer() + @pytest.mark.skip("The API is not supported yet.") @recorded_by_proxy_async async def test_create_assistant_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET + asset_identifier=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + await self._do_test_create_assistant_with_interpreter( + data_sources=[ds], **kwargs ) - await self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) @agentClientPreparer() @recorded_by_proxy_async async def test_create_assistant_with_interpreter_file_ids(self, **kwargs): """Test Create assistant with code interpreter with file IDs.""" - await self._do_test_create_assistant_with_interpreter(file_path=self._get_data_file(), **kwargs) + await self._do_test_create_assistant_with_interpreter( + file_path=self._get_data_file(), **kwargs + ) async def _do_test_create_assistant_with_interpreter(self, **kwargs): """Test create assistant with code interpreter and project asset id""" @@ -1378,7 +1493,8 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): file_id = file.id cdr = CodeInterpreterToolResource( - file_ids=[file_id] if file_id else None, data_sources=kwargs.get("data_sources") + file_ids=[file_id] if file_id else None, + data_sources=kwargs.get("data_sources"), ) tr = ToolResources(code_interpreter=cdr) # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment @@ -1399,7 +1515,9 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): ) assert message.id, "The message was not created." - run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = await ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) assert run.id, "The run was not created." await self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" @@ -1408,11 +1526,13 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): assert len(messages), "No messages were created" @agentClientPreparer() + @pytest.mark.skip("The API is not supported yet.") @recorded_by_proxy_async async def test_create_thread_with_interpreter_azure(self, **kwargs): """Test Create assistant with code interpreter with azure asset ids.""" ds = VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET + asset_identifier=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) await self._do_test_create_thread_with_interpreter(data_sources=[ds], **kwargs) @@ -1420,7 +1540,9 @@ async def test_create_thread_with_interpreter_azure(self, **kwargs): @recorded_by_proxy_async async def test_create_thread_with_interpreter_file_ids(self, **kwargs): """Test Create assistant with code interpreter with file IDs.""" - await self._do_test_create_thread_with_interpreter(file_path=self._get_data_file(), **kwargs) + await self._do_test_create_thread_with_interpreter( + file_path=self._get_data_file(), **kwargs + ) async def _do_test_create_thread_with_interpreter(self, **kwargs): """Test create assistant with code interpreter and project asset id""" @@ -1438,7 +1560,8 @@ async def _do_test_create_thread_with_interpreter(self, **kwargs): file_id = file.id cdr = CodeInterpreterToolResource( - file_ids=[file_id] if file_id else None, data_sources=kwargs.get("data_sources") + file_ids=[file_id] if file_id else None, + data_sources=kwargs.get("data_sources"), ) tr = ToolResources(code_interpreter=cdr) # notice that CodeInterpreter must be enabled in the agent creation, otherwise the agent will not be able to see the file attachment @@ -1458,7 +1581,9 @@ async def _do_test_create_thread_with_interpreter(self, **kwargs): ) assert message.id, "The message was not created." - run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = await ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) assert run.id, "The run was not created." await self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" @@ -1476,14 +1601,15 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): ds = [ VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], + asset_identifier=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] fs = FileSearchToolResource( vector_stores=[ VectorStoreConfigurations( - store_name="my_vector_store", store_configuration=VectorStoreConfiguration(data_sources=ds) + store_name="my_vector_store", + store_configuration=VectorStoreConfiguration(data_sources=ds), ) ] ) @@ -1505,7 +1631,9 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): ) assert message.id, "The message was not created." - run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = await ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) assert run.status == "completed", f"Error in run: {run.last_error}" messages = await ai_client.agents.list_messages(thread.id) assert len(messages) @@ -1513,19 +1641,25 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): await ai_client.close() @agentClientPreparer() + @pytest.mark.skip("The API is not supported yet.") @recorded_by_proxy_async async def test_create_attachment_in_thread_azure(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" ds = VectorStoreDataSource( - storage_uri=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET + asset_identifier=kwargs["azure_ai_projects_data_path"], + asset_type=VectorStoreDataSourceAssetType.URI_ASSET, + ) + await self._do_test_create_attachment_in_thread_azure( + data_sources=[ds], **kwargs ) - await self._do_test_create_attachment_in_thread_azure(data_sources=[ds], **kwargs) @agentClientPreparer() @recorded_by_proxy_async async def test_create_attachment_in_thread_file_ids(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" - await self._do_test_create_attachment_in_thread_azure(file_path=self._get_data_file(), **kwargs) + await self._do_test_create_attachment_in_thread_azure( + file_path=self._get_data_file(), **kwargs + ) async def _do_test_create_attachment_in_thread_azure(self, **kwargs): # create client @@ -1547,13 +1681,22 @@ async def _do_test_create_attachment_in_thread_azure(self, **kwargs): attachment = MessageAttachment( file_id=file_id, data_sources=kwargs.get("data_sources"), - tools=[FileSearchTool().definitions[0], CodeInterpreterTool().definitions[0]], + tools=[ + FileSearchTool().definitions[0], + CodeInterpreterTool().definitions[0], + ], + ) + message = ThreadMessageOptions( + role="user", + content="What does the attachment say?", + attachments=[attachment], ) - message = ThreadMessageOptions(role="user", content="What does the attachment say?", attachments=[attachment]) thread = await ai_client.agents.create_thread(messages=[message]) assert thread.id - run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + run = await ai_client.agents.create_and_process_run( + thread_id=thread.id, assistant_id=agent.id + ) assert run.status == "completed", f"Error in run: {run.last_error}" messages = await ai_client.agents.list_messages(thread.id) assert len(messages) @@ -1570,7 +1713,9 @@ async def _get_file_id_maybe(self, ai_client: AIProjectClient, **kwargs) -> str: return file.id return None - async def _remove_file_maybe(self, file_id: str, ai_client: AIProjectClient) -> None: + async def _remove_file_maybe( + self, file_id: str, ai_client: AIProjectClient + ) -> None: """Remove file if we have file ID.""" if file_id: await ai_client.agents.delete_file(file_id) From d920d444de88c0ee5341190b08ca1a0879c59f17 Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Mon, 11 Nov 2024 16:31:43 -0600 Subject: [PATCH 108/138] agents telemetry pyright fixes (#38457) Co-authored-by: Marko Hietala --- .../agents/_ai_agents_instrumentor.py | 169 +++++++++++++----- 1 file changed, 125 insertions(+), 44 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py index 8b7d6c8077f1..898cbd51e6fe 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py @@ -381,7 +381,7 @@ def _add_instructions_event( attributes[GEN_AI_EVENT_CONTENT] = json.dumps(event_body) span.span_instance.add_event(name=GEN_AI_SYSTEM_MESSAGE, attributes=attributes) - def _get_role(self, role: Union[str, MessageRole]) -> str: + def _get_role(self, role: Optional[Union[str, MessageRole]]) -> str: if role is None or role is _Unset: return "user" @@ -416,11 +416,11 @@ def _add_tool_assistant_message_event(self, span, step: RunStep) -> None: attributes[GEN_AI_EVENT_CONTENT] = json.dumps({"tool_calls": tool_calls}) span.span_instance.add_event(name="gen_ai.assistant.message", attributes=attributes) - def set_end_run(self, span: "AbstractSpan", run: ThreadRun) -> None: - if span and span.span_instance.is_recording: + def set_end_run(self, span: "AbstractSpan", run: Optional[ThreadRun]) -> None: + if run and span and span.span_instance.is_recording: span.add_attribute(GEN_AI_THREAD_RUN_STATUS, run.status) span.add_attribute(GEN_AI_RESPONSE_MODEL, run.model) - if run.usage: + if run and run.usage: span.add_attribute(GEN_AI_USAGE_INPUT_TOKENS, run.usage.prompt_tokens) span.add_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, run.usage.completion_tokens) @@ -446,8 +446,8 @@ def start_thread_run_span( self, operation_name: OperationName, project_name: str, - thread_id: str, - agent_id: str, + thread_id: Optional[str] = None, + agent_id: Optional[str] = None, model: Optional[str] = None, instructions: Optional[str] = None, additional_instructions: Optional[str] = None, @@ -484,9 +484,9 @@ def start_thread_run_span( def start_submit_tool_outputs_span( self, project_name: str, - thread_id: str, - run_id: str, - tool_outputs: List[ToolOutput] = _Unset, + thread_id: Optional[str] = None, + run_id: Optional[str] = None, + tool_outputs: Optional[List[ToolOutput]] = None, event_handler: Optional[AgentEventHandler] = None, ) -> "Optional[AbstractSpan]": @@ -498,8 +498,10 @@ def start_submit_tool_outputs_span( self._add_tool_message_events(span, tool_outputs) return span - def _add_tool_message_events(self, span: "Optional[AbstractSpan]", tool_outputs: List[ToolOutput]) -> bool: - if span and span.span_instance.is_recording: + def _add_tool_message_events( + self, span: "Optional[AbstractSpan]", tool_outputs: Optional[List[ToolOutput]] + ) -> bool: + if span and span.span_instance.is_recording and tool_outputs: for tool_output in tool_outputs: body = {"content": tool_output["output"], "id": tool_output["tool_call_id"]} span.span_instance.add_event("gen_ai.tool.message", {"gen_ai.event.content": json.dumps(body)}) @@ -510,7 +512,7 @@ def _add_tool_message_events(self, span: "Optional[AbstractSpan]", tool_outputs: def start_create_agent_span( self, project_name: str, - model: str = _Unset, + model: Optional[str] = None, name: Optional[str] = None, description: Optional[str] = None, instructions: Optional[str] = None, @@ -552,7 +554,7 @@ def start_create_thread_span( return span - def start_list_messages_span(self, project_name: str, thread_id: str) -> "Optional[AbstractSpan]": + def start_list_messages_span(self, project_name: str, thread_id: Optional[str] = None) -> "Optional[AbstractSpan]": return start_span(OperationName.LIST_MESSAGES, project_name, thread_id=thread_id) def trace_create_agent(self, function, *args, **kwargs): @@ -568,7 +570,7 @@ def trace_create_agent(self, function, *args, **kwargs): top_p = kwargs.get("top_p") response_format = kwargs.get("response_format") - with self.start_create_agent_span( + span = self.start_create_agent_span( project_name=project_name, name=name, model=model, @@ -580,7 +582,12 @@ def trace_create_agent(self, function, *args, **kwargs): temperature=temperature, top_p=top_p, response_format=response_format, - ) as span: + ) + + if span == None: + return function(*args, **kwargs) + + with span: try: result = function(*args, **kwargs) span.add_attribute(GEN_AI_AGENT_ID, result.id) @@ -612,7 +619,7 @@ async def trace_create_agent_async(self, function, *args, **kwargs): top_p = kwargs.get("top_p") response_format = kwargs.get("response_format") - with self.start_create_agent_span( + span = self.start_create_agent_span( project_name=project_name, name=name, model=model, @@ -624,7 +631,12 @@ async def trace_create_agent_async(self, function, *args, **kwargs): temperature=temperature, top_p=top_p, response_format=response_format, - ) as span: + ) + + if span == None: + return await function(*args, **kwargs) + + with span: try: result = await function(*args, **kwargs) span.add_attribute(GEN_AI_AGENT_ID, result.id) @@ -647,7 +659,12 @@ def trace_create_thread(self, function, *args, **kwargs): project_name = args[0]._config.project_name messages = kwargs.get("messages") - with self.start_create_thread_span(project_name=project_name, messages=messages) as span: + span = self.start_create_thread_span(project_name=project_name, messages=messages) + + if span == None: + return function(*args, **kwargs) + + with span: try: result = function(*args, **kwargs) span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) @@ -670,7 +687,12 @@ async def trace_create_thread_async(self, function, *args, **kwargs): project_name = args[0]._config.project_name messages = kwargs.get("messages") - with self.start_create_thread_span(project_name=project_name, messages=messages) as span: + span = self.start_create_thread_span(project_name=project_name, messages=messages) + + if span == None: + return await function(*args, **kwargs) + + with span: try: result = await function(*args, **kwargs) span.add_attribute(GEN_AI_THREAD_ID, result.get("id")) @@ -691,15 +713,19 @@ async def trace_create_thread_async(self, function, *args, **kwargs): def trace_create_message(self, function, *args, **kwargs): project_name = args[0]._config.project_name - messages = kwargs.get("messages") thread_id = kwargs.get("thread_id") role = kwargs.get("role") content = kwargs.get("content") attachments = kwargs.get("attachments") - with self.start_create_message_span( + span = self.start_create_message_span( project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments - ) as span: + ) + + if span == None: + return function(*args, **kwargs) + + with span: try: result = function(*args, **kwargs) span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) @@ -720,15 +746,19 @@ def trace_create_message(self, function, *args, **kwargs): async def trace_create_message_async(self, function, *args, **kwargs): project_name = args[0]._config.project_name - messages = kwargs.get("messages") thread_id = kwargs.get("thread_id") role = kwargs.get("role") content = kwargs.get("content") attachments = kwargs.get("attachments") - with self.start_create_message_span( + span = self.start_create_message_span( project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments - ) as span: + ) + + if span == None: + return await function(*args, **kwargs) + + with span: try: result = await function(*args, **kwargs) span.add_attribute(GEN_AI_MESSAGE_ID, result.get("id")) @@ -762,7 +792,7 @@ def trace_create_run(self, operation_name, function, *args, **kwargs): max_completion_tokens = kwargs.get("max_completion_tokens") response_format = kwargs.get("response_format") - with self.start_thread_run_span( + span = self.start_thread_run_span( operation_name, project_name, thread_id, @@ -777,7 +807,12 @@ def trace_create_run(self, operation_name, function, *args, **kwargs): max_prompt_tokens=max_prompt_tokens, max_completion_tokens=max_completion_tokens, response_format=response_format, - ) as span: + ) + + if span == None: + return function(*args, **kwargs) + + with span: try: result = function(*args, **kwargs) self.set_end_run(span, result) @@ -811,7 +846,7 @@ async def trace_create_run_async(self, operation_name, function, *args, **kwargs max_completion_tokens = kwargs.get("max_completion_tokens") response_format = kwargs.get("response_format") - with self.start_thread_run_span( + span = self.start_thread_run_span( operation_name, project_name, thread_id, @@ -826,7 +861,12 @@ async def trace_create_run_async(self, operation_name, function, *args, **kwargs max_prompt_tokens=max_prompt_tokens, max_completion_tokens=max_completion_tokens, response_format=response_format, - ) as span: + ) + + if span == None: + return await function(*args, **kwargs) + + with span: try: result = await function(*args, **kwargs) if span.span_instance.is_recording: @@ -858,15 +898,20 @@ def trace_submit_tool_outputs(self, stream, function, *args, **kwargs): tool_outputs = kwargs.get("tool_outputs") event_handler = kwargs.get("event_handler") - with self.start_submit_tool_outputs_span( + span = self.start_submit_tool_outputs_span( project_name=project_name, thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, event_handler=event_handler, - ) as span: + ) + + if span == None: + return function(*args, **kwargs) + + with span: try: - if stream: + if stream and event_handler: kwargs["event_handler"] = self.wrap_handler(event_handler, span) result = function(*args, **kwargs) @@ -894,13 +939,18 @@ async def trace_submit_tool_outputs_async(self, stream, function, *args, **kwarg tool_outputs = kwargs.get("tool_outputs") event_handler = kwargs.get("event_handler") - with self.start_submit_tool_outputs_span( + span = self.start_submit_tool_outputs_span( project_name=project_name, thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs, event_handler=event_handler, - ) as span: + ) + + if span is None: + return await function(*args, **kwargs) + + with span: try: if stream: kwargs["event_handler"] = self.wrap_handler(event_handler, span) @@ -928,6 +978,10 @@ def trace_handle_submit_tool_outputs(self, function, *args, **kwargs): if event_handler is None: event_handler = args[2] span = getattr(event_handler, "span", None) + + if span is None: + return function(*args, **kwargs) + with span.change_context(span.span_instance): try: result = function(*args, **kwargs) @@ -951,6 +1005,10 @@ async def trace_handle_submit_tool_outputs_async(self, function, *args, **kwargs if event_handler is None: event_handler = args[2] span = getattr(event_handler, "span", None) + + if span is None: + return await function(*args, **kwargs) + with span.change_context(span.span_instance): try: result = await function(*args, **kwargs) @@ -1003,6 +1061,9 @@ def trace_create_stream(self, function, *args, **kwargs): response_format=response_format, ) + if span is None: + return function(*args, **kwargs) + # TODO: how to keep span active in the current context without existing? # TODO: dummy span for none with span.change_context(span.span_instance): @@ -1058,6 +1119,9 @@ async def trace_create_stream_async(self, function, *args, **kwargs): response_format=response_format, ) + if span is None: + return await function(*args, **kwargs) + # TODO: how to keep span active in the current context without existing? # TODO: dummy span for none with span.change_context(span.span_instance): @@ -1083,7 +1147,12 @@ def trace_list_messages(self, function, *args, **kwargs): project_name = args[0]._config.project_name thread_id = kwargs.get("thread_id") - with self.start_list_messages_span(project_name=project_name, thread_id=thread_id) as span: + span = self.start_list_messages_span(project_name=project_name, thread_id=thread_id) + + if span is None: + return function(*args, **kwargs) + + with span: try: result = function(*args, **kwargs) for message in result.data: @@ -1108,7 +1177,12 @@ async def trace_list_messages_async(self, function, *args, **kwargs): project_name = args[0]._config.project_name thread_id = kwargs.get("thread_id") - with self.start_list_messages_span(project_name=project_name, thread_id=thread_id) as span: + span = self.start_list_messages_span(project_name=project_name, thread_id=thread_id) + + if span is None: + return await function(*args, **kwargs) + + with span: try: result = await function(*args, **kwargs) for message in result.data: @@ -1142,21 +1216,23 @@ def handle_run_stream_exit(self, function, *args, **kwargs): ): agent_run_stream.event_handler.__exit__(exc_type, exc_val, exc_tb) - def wrap_handler(self, handler: "AgentEventHandler", span: "AbstractSpan") -> "AgentEventHandler": + def wrap_handler( + self, handler: "Optional[AgentEventHandler]" = None, span: "Optional[AbstractSpan]" = None + ) -> "Optional[AgentEventHandler]": if isinstance(handler, _AgentEventHandlerTraceWrapper): return handler if span and span.span_instance.is_recording: - return _AgentEventHandlerTraceWrapper(handler, self, span) + return _AgentEventHandlerTraceWrapper(self, span, handler) return handler def start_create_message_span( self, project_name: str, - thread_id: str, - content: str, - role: Union[str, MessageRole] = _Unset, + thread_id: Optional[str] = None, + content: Optional[str] = None, + role: Optional[Union[str, MessageRole]] = None, attachments: Optional[List[MessageAttachment]] = None, ) -> "Optional[AbstractSpan]": role_str = self._get_role(role) @@ -1500,7 +1576,10 @@ def _is_content_recording_enabled(self) -> bool: class _AgentEventHandlerTraceWrapper(AgentEventHandler): def __init__( - self, inner_handler: AgentEventHandler, instrumentor: _AIAgentsInstrumentorPreview, span: "AbstractSpan" + self, + instrumentor: _AIAgentsInstrumentorPreview, + span: "AbstractSpan", + inner_handler: Optional[AgentEventHandler] = None, ): super().__init__() self.span = span @@ -1562,8 +1641,10 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.ended = True self.instrumentor.set_end_run(self.span, self.last_run) - if self.last_run.last_error: - self.span.set_status(StatusCode.ERROR, self.last_run.last_error.message) + if self.last_run and self.last_run.last_error: + self.span.set_status( + StatusCode.ERROR, self.last_run.last_error.message + ) # pyright: ignore [reportPossiblyUnboundVariable] self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) self.span.__exit__(exc_type, exc_val, exc_tb) From 7a66f7e704fe1585ea69fd410a0fcf630d83aaa2 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Mon, 11 Nov 2024 17:04:47 -0800 Subject: [PATCH 109/138] Fix pyright for agents. (#38464) * Fix as if python version is 3.8 * Fixed * Add custom config * Move line, ignoring import --- .../azure/ai/projects/models/_patch.py | 115 +++++++++--------- .../azure/ai/projects/operations/_patch.py | 4 +- .../agents/_ai_agents_instrumentor.py | 7 +- sdk/ai/azure-ai-projects/pyrightconfig.json | 16 +++ ...tore_batch_enterprise_file_search_async.py | 2 +- 5 files changed, 80 insertions(+), 64 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/pyrightconfig.json diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index cb155bc4f37e..4800f5dd2ce0 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -72,6 +72,8 @@ logger = logging.getLogger(__name__) +StreamEventData = Union[MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep, None] + def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[str, Any]: """ Remove the parameters, non present in class public fields; return shallow copy of a dictionary. @@ -94,7 +96,7 @@ def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[st return new_params -def _safe_instantiate(model_class: Type, parameters: Union[str, Dict[str, Any]]) -> Any: +def _safe_instantiate(model_class: Type, parameters: Union[str, Dict[str, Any]]) -> Union[str, StreamEventData]: """ Instantiate class with the set of parameters from the server. @@ -104,7 +106,7 @@ def _safe_instantiate(model_class: Type, parameters: Union[str, Dict[str, Any]]) """ if not isinstance(parameters, dict): return parameters - return model_class(**_filter_parameters(model_class, parameters)) + return cast(StreamEventData, model_class(**_filter_parameters(model_class, parameters))) class ConnectionProperties: @@ -928,10 +930,7 @@ async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: """Handle any unhandled event types.""" -StreamEventData = Union[MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep, None] - - -class AsyncAgentRunStream(AsyncIterator[Tuple[str, StreamEventData]]): +class AsyncAgentRunStream(AsyncIterator[Tuple[str, Union[str, StreamEventData]]]): def __init__( self, response_iterator: AsyncIterator[bytes], @@ -957,7 +956,7 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): def __aiter__(self): return self - async def __anext__(self) -> Tuple[str, StreamEventData]: + async def __anext__(self) -> Tuple[str, Union[str, StreamEventData]]: while True: try: chunk = await self.response_iterator.__anext__() @@ -973,9 +972,9 @@ async def __anext__(self) -> Tuple[str, StreamEventData]: event_data_str, self.buffer = self.buffer.split("\n\n", 1) return await self._process_event(event_data_str) - def _parse_event_data(self, event_data_str: str) -> Tuple[str, StreamEventData, str]: + def _parse_event_data(self, event_data_str: str) -> Tuple[str, Union[str, StreamEventData], str]: event_lines = event_data_str.strip().split("\n") - event_type = None + event_type: Optional[str] = None event_data = "" error_string = "" @@ -1001,44 +1000,44 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, StreamEventData, # Map to the appropriate class instance if event_type in { - AgentStreamEvent.THREAD_RUN_CREATED, - AgentStreamEvent.THREAD_RUN_QUEUED, - AgentStreamEvent.THREAD_RUN_IN_PROGRESS, - AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION, - AgentStreamEvent.THREAD_RUN_COMPLETED, - AgentStreamEvent.THREAD_RUN_FAILED, - AgentStreamEvent.THREAD_RUN_CANCELLING, - AgentStreamEvent.THREAD_RUN_CANCELLED, - AgentStreamEvent.THREAD_RUN_EXPIRED, + AgentStreamEvent.THREAD_RUN_CREATED.value, + AgentStreamEvent.THREAD_RUN_QUEUED.value, + AgentStreamEvent.THREAD_RUN_IN_PROGRESS.value, + AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION.value, + AgentStreamEvent.THREAD_RUN_COMPLETED.value, + AgentStreamEvent.THREAD_RUN_FAILED.value, + AgentStreamEvent.THREAD_RUN_CANCELLING.value, + AgentStreamEvent.THREAD_RUN_CANCELLED.value, + AgentStreamEvent.THREAD_RUN_EXPIRED.value, }: event_data_obj = _safe_instantiate(ThreadRun, parsed_data) elif event_type in { - AgentStreamEvent.THREAD_RUN_STEP_CREATED, - AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, - AgentStreamEvent.THREAD_RUN_STEP_COMPLETED, - AgentStreamEvent.THREAD_RUN_STEP_FAILED, - AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, - AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, + AgentStreamEvent.THREAD_RUN_STEP_CREATED.value, + AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS.value, + AgentStreamEvent.THREAD_RUN_STEP_COMPLETED.value, + AgentStreamEvent.THREAD_RUN_STEP_FAILED.value, + AgentStreamEvent.THREAD_RUN_STEP_CANCELLED.value, + AgentStreamEvent.THREAD_RUN_STEP_EXPIRED.value, }: event_data_obj = _safe_instantiate(RunStep, parsed_data) elif event_type in { - AgentStreamEvent.THREAD_MESSAGE_CREATED, - AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, - AgentStreamEvent.THREAD_MESSAGE_COMPLETED, - AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, + AgentStreamEvent.THREAD_MESSAGE_CREATED.value, + AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS.value, + AgentStreamEvent.THREAD_MESSAGE_COMPLETED.value, + AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE.value, }: event_data_obj = _safe_instantiate(ThreadMessage, parsed_data) - elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: + elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA.value: event_data_obj = _safe_instantiate(MessageDeltaChunk, parsed_data) - elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: + elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA.value: event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) else: - event_data_obj = parsed_data + event_data_obj = "" error_string = str(parsed_data) return event_type, event_data_obj, error_string - async def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData]: + async def _process_event(self, event_data_str: str) -> Tuple[str, Union[str, StreamEventData]]: event_type, event_data_obj, error_string = self._parse_event_data(event_data_str) if ( @@ -1082,7 +1081,7 @@ async def until_done(self) -> None: pass -class AgentRunStream(Iterator[Tuple[str, StreamEventData]]): +class AgentRunStream(Iterator[Tuple[str, Union[str, StreamEventData]]]): def __init__( self, response_iterator: Iterator[bytes], @@ -1106,7 +1105,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def __iter__(self): return self - def __next__(self) -> Tuple[str, StreamEventData]: + def __next__(self) -> Tuple[str, Union[str, StreamEventData]]: if self.done: raise StopIteration while True: @@ -1124,7 +1123,7 @@ def __next__(self) -> Tuple[str, StreamEventData]: event_data_str, self.buffer = self.buffer.split("\n\n", 1) return self._process_event(event_data_str) - def _parse_event_data(self, event_data_str: str) -> Tuple[str, StreamEventData, str]: + def _parse_event_data(self, event_data_str: str) -> Tuple[str, Union[str, StreamEventData], str]: event_lines = event_data_str.strip().split("\n") event_type = None event_data = "" @@ -1150,44 +1149,44 @@ def _parse_event_data(self, event_data_str: str) -> Tuple[str, StreamEventData, # Map to the appropriate class instance if event_type in { - AgentStreamEvent.THREAD_RUN_CREATED, - AgentStreamEvent.THREAD_RUN_QUEUED, - AgentStreamEvent.THREAD_RUN_IN_PROGRESS, - AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION, - AgentStreamEvent.THREAD_RUN_COMPLETED, - AgentStreamEvent.THREAD_RUN_FAILED, - AgentStreamEvent.THREAD_RUN_CANCELLING, - AgentStreamEvent.THREAD_RUN_CANCELLED, - AgentStreamEvent.THREAD_RUN_EXPIRED, + AgentStreamEvent.THREAD_RUN_CREATED.value, + AgentStreamEvent.THREAD_RUN_QUEUED.value, + AgentStreamEvent.THREAD_RUN_IN_PROGRESS.value, + AgentStreamEvent.THREAD_RUN_REQUIRES_ACTION.value, + AgentStreamEvent.THREAD_RUN_COMPLETED.value, + AgentStreamEvent.THREAD_RUN_FAILED.value, + AgentStreamEvent.THREAD_RUN_CANCELLING.value, + AgentStreamEvent.THREAD_RUN_CANCELLED.value, + AgentStreamEvent.THREAD_RUN_EXPIRED.value, }: event_data_obj = _safe_instantiate(ThreadRun, parsed_data) elif event_type in { - AgentStreamEvent.THREAD_RUN_STEP_CREATED, - AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS, - AgentStreamEvent.THREAD_RUN_STEP_COMPLETED, - AgentStreamEvent.THREAD_RUN_STEP_FAILED, - AgentStreamEvent.THREAD_RUN_STEP_CANCELLED, - AgentStreamEvent.THREAD_RUN_STEP_EXPIRED, + AgentStreamEvent.THREAD_RUN_STEP_CREATED.value, + AgentStreamEvent.THREAD_RUN_STEP_IN_PROGRESS.value, + AgentStreamEvent.THREAD_RUN_STEP_COMPLETED.value, + AgentStreamEvent.THREAD_RUN_STEP_FAILED.value, + AgentStreamEvent.THREAD_RUN_STEP_CANCELLED.value, + AgentStreamEvent.THREAD_RUN_STEP_EXPIRED.value, }: event_data_obj = _safe_instantiate(RunStep, parsed_data) elif event_type in { - AgentStreamEvent.THREAD_MESSAGE_CREATED, - AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS, - AgentStreamEvent.THREAD_MESSAGE_COMPLETED, - AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE, + AgentStreamEvent.THREAD_MESSAGE_CREATED.value, + AgentStreamEvent.THREAD_MESSAGE_IN_PROGRESS.value, + AgentStreamEvent.THREAD_MESSAGE_COMPLETED.value, + AgentStreamEvent.THREAD_MESSAGE_INCOMPLETE.value, }: event_data_obj = _safe_instantiate(ThreadMessage, parsed_data) - elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA: + elif event_type == AgentStreamEvent.THREAD_MESSAGE_DELTA.value: event_data_obj = _safe_instantiate(MessageDeltaChunk, parsed_data) - elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA: + elif event_type == AgentStreamEvent.THREAD_RUN_STEP_DELTA.value: event_data_obj = _safe_instantiate(RunStepDeltaChunk, parsed_data) else: - event_data_obj = parsed_data + event_data_obj = "" error_string = str(parsed_data) return event_type, event_data_obj, error_string - def _process_event(self, event_data_str: str) -> Tuple[str, StreamEventData]: + def _process_event(self, event_data_str: str) -> Tuple[str, Union[str, StreamEventData]]: event_type, event_data_obj, error_string = self._parse_event_data(event_data_str) if ( diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index e272cf0328de..5d5781a73bcf 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -418,14 +418,14 @@ def _get_log_exporter(destination: Union[TextIO, str, None]) -> Any: # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter try: from opentelemetry.sdk._logs.export import ConsoleLogExporter + return ConsoleLogExporter() except ModuleNotFoundError as ex: # since OTel logging is still in beta in Python, we're going to swallow any errors # and just warn about them. logger.warning( "Failed to configure OpenTelemetry logging.", exc_info=ex ) - - return ConsoleLogExporter() + return None else: raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py index 898cbd51e6fe..01d37b7e1912 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py @@ -63,7 +63,7 @@ # pylint: disable = no-name-in-module from opentelemetry.trace import Span, StatusCode - from azure.core.tracing import AbstractSpan, SpanKind # type: ignore + from azure.core.tracing import AbstractSpan # type: ignore _tracing_library_available = True except ModuleNotFoundError: @@ -1643,8 +1643,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): if self.last_run and self.last_run.last_error: self.span.set_status( - StatusCode.ERROR, self.last_run.last_error.message - ) # pyright: ignore [reportPossiblyUnboundVariable] + StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] + self.last_run.last_error.message + ) self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) self.span.__exit__(exc_type, exc_val, exc_tb) diff --git a/sdk/ai/azure-ai-projects/pyrightconfig.json b/sdk/ai/azure-ai-projects/pyrightconfig.json new file mode 100644 index 000000000000..4c1e55d0fb79 --- /dev/null +++ b/sdk/ai/azure-ai-projects/pyrightconfig.json @@ -0,0 +1,16 @@ +{ + "reportTypeCommentUsage": true, + "reportMissingImports": false, + "pythonVersion": "3.11", + "exclude": [ + "**/downloaded", + "**/sample_agents_vector_store_batch_enterprise_file_search_async.py", + "**/sample_agents_with_file_search_attachment.py", + "**/sample_agents_with_code_interpreter_file_attachment.py", + "**/sample_agents_code_interpreter_attachment_enterprise_search.py", + "**/sample_agents_with_file_search_attachment_async.py", + "**/sample_agents_code_interpreter_attachment_enterprise_search_async.py", + "**/sample_agents_code_interpreter_attachment_enterprise_search_async.py", + "**/sample_agents_code_interpreter_attachment_async.py" + ] +} \ No newline at end of file diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py index f0b655278f0a..fa1b799aed58 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py @@ -92,7 +92,7 @@ async def main(): ) print(f"Created run, run ID: {run.id}") - await file_search_tool.remove_vector_store(vector_store.id) + file_search_tool.remove_vector_store(vector_store.id) print( f"Removed vector store from file search, vector store ID: {vector_store.id}" ) From a629bfac672713faa7e933c18e3eb32b47cfe309 Mon Sep 17 00:00:00 2001 From: Glenn Harper <64209257+glharper@users.noreply.github.com> Date: Tue, 12 Nov 2024 08:57:14 -0800 Subject: [PATCH 110/138] fix pip install instruction (#38480) --- .../async_samples/sample_agents_code_interpreter_async.py | 2 +- .../sample_agents_code_interpreter_attachment_async.py | 2 +- ...gents_code_interpreter_attachment_enterprise_search_async.py | 2 +- .../async_samples/sample_agents_run_with_toolset_async.py | 2 +- ...le_agents_vector_store_batch_enterprise_file_search_async.py | 2 +- .../sample_agents_vector_store_enterprise_file_search_async.py | 2 +- .../sample_agents_vector_store_file_search_async.py | 2 +- .../samples/agents/sample_agents_azure_ai_search.py | 2 +- .../samples/agents/sample_agents_bing_grounding.py | 2 +- ...mple_agents_code_interpreter_attachment_enterprise_search.py | 2 +- .../samples/agents/sample_agents_enterprise_file_search.py | 2 +- .../sample_agents_vector_store_batch_enterprise_file_search.py | 2 +- .../samples/agents/sample_agents_vector_store_file_search.py | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py index ebe2bc07b102..15d1a6626eec 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_async.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py index 36b63adae474..477ae0c49852 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_async.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py index ce06f5119303..2c609387c775 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_code_interpreter_attachment_enterprise_search_async.py @@ -14,7 +14,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py index db888c32239b..fa431dede97b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_run_with_toolset_async.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py index fa1b799aed58..0fe1e6f5a9e0 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py @@ -14,7 +14,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity azure-ai-ml + pip install azure-ai-projects azure-identity azure-ai-ml Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py index 0d3e543e3cf3..2100e70e6719 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_enterprise_file_search_async.py @@ -13,7 +13,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity azure-ai-ml + pip install azure-ai-projects azure-identity azure-ai-ml Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py index ef3f0477a6a4..1ec109a8501e 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_file_search_async.py @@ -13,7 +13,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py index f23271b0fbb6..a85f7432c846 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py index a63364218c5f..981b4456ad87 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_bing_grounding.py @@ -15,7 +15,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py index 7c810737a3a7..337d13ba5353 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_code_interpreter_attachment_enterprise_search.py @@ -14,7 +14,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py index 2cce6f7e6939..00398ce98f34 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_enterprise_file_search.py @@ -13,7 +13,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity azure-ai-ml + pip install azure-ai-projects azure-identity azure-ai-ml Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_enterprise_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_enterprise_file_search.py index e84648f06175..1ac646caf595 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_enterprise_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_batch_enterprise_file_search.py @@ -13,7 +13,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity azure-ai-ml + pip install azure-ai-projects azure-identity azure-ai-ml Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py index eacf51437485..3c93599ed172 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_vector_store_file_search.py @@ -13,7 +13,7 @@ Before running the sample: - pip install azure.ai.projects azure-identity + pip install azure-ai-projects azure-identity Set this environment variables with your own values: PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. From 80f7311f3757cbf820adea65001d174ce2a7778f Mon Sep 17 00:00:00 2001 From: kdestin <101366538+kdestin@users.noreply.github.com> Date: Tue, 12 Nov 2024 12:29:39 -0500 Subject: [PATCH 111/138] [ai] fix(pylint): Resolve `C0301`, `C4739`, `C4740`, `C4741`, `C4742`, `C4743` for `azure-ai-projects` (#38459) * fix(pylint): Resolve C0301(line-too-long) * fix(pylint): Resolve C4743(docstring-should-be-keyword) * fix(pylint): Resolve C4739(docstring-missing-param) * fix(pylint): Resolve C4740(docstring-missing-type) * fix(pylint): Resolve C4741(docstring-missing-return) * fix(pylint): Resolve C4742(docstring-missing-rtype) * style: Run black --- .../azure/ai/projects/_patch.py | 20 +- .../azure/ai/projects/aio/_patch.py | 12 +- .../ai/projects/aio/operations/_patch.py | 163 +++++++++------- .../azure/ai/projects/models/_patch.py | 176 ++++++++++++++---- .../azure/ai/projects/operations/_patch.py | 164 +++++++++------- .../agents/_ai_agents_instrumentor.py | 11 +- 6 files changed, 365 insertions(+), 181 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index f045c051ce7f..42231b5deca4 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -57,9 +57,10 @@ def __init__( kwargs3 = kwargs.copy() # For getting AppInsights connection string from the AppInsights resource. - # The AppInsights resource URL is not known at this point. We need to get it from the AzureML "Workspace - Get" REST API call. It will have - # the form: https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} - _endpoint0 = "https://management.azure.com" # pylint: disable=line-too-long + # The AppInsights resource URL is not known at this point. We need to get it from the + # AzureML "Workspace - Get" REST API call. It will have the form: + # https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} + _endpoint0 = "https://management.azure.com" self._config0: AIProjectClientConfiguration = AIProjectClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, @@ -91,7 +92,13 @@ def __init__( self._client0: PipelineClient = PipelineClient(base_url=_endpoint0, policies=_policies0, **kwargs0) # For Endpoints operations (listing connections, getting connection properties, getting project properties) - _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + _endpoint1 = ( + "https://management.azure.com/" + + f"subscriptions/{subscription_id}/" + + f"resourceGroups/{resource_group_name}/" + + "providers/Microsoft.MachineLearningServices/" + + f"workspaces/{project_name}" + ) self._config1: AIProjectClientConfiguration = AIProjectClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, @@ -220,7 +227,10 @@ def from_connection_string(cls, conn_str: str, credential: "TokenCredential", ** """ Create an AIProjectClient from a connection string. - :param conn_str: The connection string, copied from your AI Studio project. + :param str conn_str: The connection string, copied from your AI Studio project. + :param TokenCredential credential: Credential used to authenticate requests to the service. + :return: An AIProjectClient instance. + :rtype: AIProjectClient """ if not conn_str: raise ValueError("Connection string is required") diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 61fc91d2fbb7..7b163a8c6c5a 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -63,8 +63,9 @@ def __init__( kwargs3 = kwargs.copy() # For getting AppInsights connection string from the AppInsights resource. - # The AppInsights resource URL is not known at this point. We need to get it from the AzureML "Workspace - Get" REST API call. It will have - # the form: https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} + # The AppInsights resource URL is not known at this point. We need to get it from the + # AzureML "Workspace - Get" REST API call. It will have the form: + # https://management.azure.com/subscriptions/{appinsights_subscription_id}/resourceGroups/{appinsights_resource_group_name}/providers/microsoft.insights/components/{appinsights_resource_name} # pylint: disable=line-too-long _endpoint0 = "https://management.azure.com" # pylint: disable=line-too-long self._config0: AIProjectClientConfiguration = AIProjectClientConfiguration( endpoint=endpoint, @@ -97,7 +98,7 @@ def __init__( self._client0: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint0, policies=_policies0, **kwargs0) # For Endpoints operations (enumerating connections, getting SAS tokens) - _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" # pylint: disable=line-too-long + _endpoint1 = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{project_name}" self._config1: AIProjectClientConfiguration = AIProjectClientConfiguration( endpoint=endpoint, subscription_id=subscription_id, @@ -226,7 +227,10 @@ def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential """ Create an asynchronous AIProjectClient from a connection string. - :param conn_str: The connection string, copied from your AI Studio project. + :param str conn_str: The connection string, copied from your AI Studio project. + :param AsyncTokenCredential credential: Credential used to authenticate requests to the service. + :return: An AIProjectClient instance. + :rtype: AIProjectClient """ if not conn_str: raise ValueError("Connection string is required") diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 927431e8c3d7..86144a36e20a 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -68,18 +68,19 @@ def __init__(self, outer_instance): @distributed_trace_async async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": - """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource. At least one AI model that supports chat completions must be deployed - in this resource. The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. - Raises ~azure.core.exceptions.ResourceNotFoundError exception if an Azure AI Services connection - does not exist. - Raises ~azure.core.exceptions.ModuleNotFoundError exception if the `azure-ai-inference` package - is not installed. + """Get an authenticated asynchronous ChatCompletionsClient (from the package azure-ai-inference) for + the default Azure AI Services connected resource. + + At least one AI model that supports chat completions must be deployed in this resource. + + .. note:: + + The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. :return: An authenticated chat completions client :rtype: ~azure.ai.inference.models.ChatCompletionsClient - :raises ~azure.core.exceptions.ResourceNotFoundError: - :raises ~azure.core.exceptions.ModuleNotFoundError: + :raises ~azure.core.exceptions.ResourceNotFoundError: An Azure AI Services connection does not exist. + :raises ~azure.core.exceptions.ModuleNotFoundError: The `azure-ai-inference` package is not installed. :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) @@ -114,7 +115,8 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" + "[InferenceOperations.get_chat_completions_client]" + + " Creating ChatCompletionsClient using API key authentication" ) from azure.core.credentials import AzureKeyCredential @@ -122,12 +124,14 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" elif connection.authentication_type == AuthenticationType.ENTRA_ID: # MaaS models do not yet support EntraID auth logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" + "[InferenceOperations.get_chat_completions_client]" + + " Creating ChatCompletionsClient using Entra ID authentication" ) client = ChatCompletionsClient(endpoint=endpoint, credential=connection.properties.token_credential) elif connection.authentication_type == AuthenticationType.SAS: logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" + "[InferenceOperations.get_chat_completions_client] " + + "Creating ChatCompletionsClient using SAS authentication" ) raise ValueError( "Getting chat completions client from a connection with SAS authentication is not yet supported" @@ -139,18 +143,19 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" @distributed_trace_async async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": - """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for the default - Azure AI Services connected resource. At least one AI model that supports text embeddings must be deployed - in this resource. The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. - Raises ~azure.core.exceptions.ResourceNotFoundError exception if an Azure AI Services connection - does not exist. - Raises ~azure.core.exceptions.ModuleNotFoundError exception if the `azure-ai-inference` package - is not installed. + """Get an authenticated asynchronous EmbeddingsClient (from the package azure-ai-inference) for + the default Azure AI Services connected resource. + + At least one AI model that supports text embeddings must be deployed in this resource. + + .. note:: + + The packages `azure-ai-inference` and `aiohttp` must be installed prior to calling this method. :return: An authenticated chat completions client :rtype: ~azure.ai.inference.models.EmbeddingsClient - :raises ~azure.core.exceptions.ResourceNotFoundError: - :raises ~azure.core.exceptions.ModuleNotFoundError: + :raises ~azure.core.exceptions.ResourceNotFoundError: An Azure AI Services connection does not exist. + :raises ~azure.core.exceptions.ModuleNotFoundError: The `azure-ai-inference` package is not installed. :raises ~azure.core.exceptions.HttpResponseError: """ kwargs.setdefault("merge_span", True) @@ -258,7 +263,6 @@ async def get_azure_openai_client(self, *, api_version: Optional[str] = None, ** auth = "Creating AzureOpenAI using SAS authentication" logger.debug("[InferenceOperations.get_azure_openai_client] %s", auth) client = AsyncAzureOpenAI( - # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider azure_ad_token_provider=get_bearer_token_provider( connection.token_credential, "https://cognitiveservices.azure.com/.default" ), @@ -281,9 +285,10 @@ async def get_default( populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError exception if a connection with the given name was not found. - :param connection_type: The connection type. Required. + :keyword connection_type: The connection type. Required. :type connection_type: ~azure.ai.projects.models._models.ConnectionType - :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :keyword with_credentials: Whether to populate the connection properties with authentication credentials. + Optional. :type with_credentials: bool :return: The connection properties, or `None` if there are no connections of the specified type. :rtype: ~azure.ai.projects.model.ConnectionProperties @@ -310,9 +315,10 @@ async def get(self, *, connection_name: str, with_credentials: bool = False, **k populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError exception if a connection with the given name was not found. - :param connection_name: Connection Name. Required. + :keyword connection_name: Connection Name. Required. :type connection_name: str - :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :keyword with_credentials: Whether to populate the connection properties with authentication credentials. + Optional. :type with_credentials: bool :return: The connection properties, or `None` if a connection with this name does not exist. :rtype: ~azure.ai.projects.models.ConnectionProperties @@ -353,8 +359,8 @@ async def list( ) -> Sequence[ConnectionProperties]: """List the properties of all connections, or all connections of a certain connection type. - :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. - If not provided, all connections are listed. + :keyword connection_type: The connection type. Optional. If provided, this method lists connections of this + type. If not provided, all connections are listed. :type connection_type: ~azure.ai.projects.models._models.ConnectionType :return: A list of connection properties :rtype: Iterable[~azure.ai.projects.models._models.ConnectionProperties] @@ -382,16 +388,13 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) async def get_connection_string(self) -> str: - """ - Get the Application Insights connection string associated with the Project's Application Insights resource. - On first call, this method makes a service call to the Application Insights resource URL to get the connection string. - Subsequent calls return the cached connection string, if one exists. - Raises ~azure.core.exceptions.ResourceNotFoundError exception if an Application Insights resource was not - enabled for this project. + """Get the Application Insights connection string associated with the Project's + Application Insights resource. :return: The Application Insights connection string if a the resource was enabled for the Project. :rtype: str - :raises ~azure.core.exceptions.ResourceNotFoundError: + :raises ~azure.core.exceptions.ResourceNotFoundError: Application Insights resource was not enabled + for this project. """ if not self._connection_string: # Get the AI Studio Project properties, including Application Insights resource URL if exists @@ -619,21 +622,35 @@ async def create_agent( Creates a new agent with various configurations, delegating to the generated operations. :param body: JSON or IO[bytes]. Required if `model` is not provided. - :param model: The ID of the model to use. Required if `body` is not provided. - :param name: The name of the new agent. - :param description: A description for the new agent. - :param instructions: System instructions for the agent. - :param tools: List of tools definitions for the agent. - :param tool_resources: Resources used by the agent's tools. - :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + :type body: Union[JSON, IO[bytes]] + :keyword model: The ID of the model to use. Required if `body` is not provided. + :paramtype model: str + :keyword name: The name of the new agent. + :paramtype name: Optional[str] + :keyword description: A description for the new agent. + :paramtype description: Optional[str] + :keyword instructions: System instructions for the agent. + :paramtype instructions: Optional[str] + :keyword tools: List of tools definitions for the agent. + :paramtype tools: Optional[List[_models.ToolDefinition]] + :keyword tool_resources: Resources used by the agent's tools. + :paramtype tool_resources: Optional[_models.ToolResources] + :keyword toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). - :param temperature: Sampling temperature for generating agent responses. - :param top_p: Nucleus sampling parameter. - :param response_format: Response format for tool calls. - :param metadata: Key/value pairs for storing additional information. - :param content_type: Content type of the body. - :param kwargs: Additional parameters. + :paramtype toolset: Optional[_models.AsyncToolSet] + :keyword temperature: Sampling temperature for generating agent responses. + :paramtype temperature: Optional[float] + :keyword top_p: Nucleus sampling parameter. + :paramtype top_p: Optional[float] + :keyword response_format: Response format for tool calls. + :paramtype response_format: Optional["_types.AgentsApiResponseFormatOption"] + :keyword metadata: Key/value pairs for storing additional information. + :paramtype metadata: Optional[Dict[str, str]] + :keyword content_type: Content type of the body. + :paramtype content_type: str + :keyword kwargs: Additional parameters. :return: An Agent object. + :rtype: _models.Agent :raises: HttpResponseError for HTTP errors. """ if body is not _Unset: @@ -1463,7 +1480,9 @@ async def create_stream( async def create_stream( self, thread_id: str, body: Union[JSON, IO[bytes]], *, content_type: str = "application/json", **kwargs: Any ) -> _models.AsyncAgentRunStream: - """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + """Creates a new run for an agent thread. + + Terminating when the Run enters a terminal state with a `data: [DONE]` message. :param thread_id: Required. :type thread_id: str @@ -1500,7 +1519,9 @@ async def create_stream( event_handler: Optional[_models.AsyncAgentEventHandler] = None, **kwargs: Any, ) -> _models.AsyncAgentRunStream: - """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + """Creates a new run for an agent thread. + + Terminating when the Run enters a terminal state with a `data: [DONE]` message. :param thread_id: Required. :type thread_id: str @@ -1712,8 +1733,7 @@ async def submit_tool_outputs_to_run( :type body: JSON or IO[bytes] :keyword tool_outputs: Required. :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :param event_handler: The event handler to use for processing events during the run. - :param kwargs: Additional parameters. + :keyword event_handler: The event handler to use for processing events during the run. :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -1836,8 +1856,7 @@ async def submit_tool_outputs_to_stream( :type body: JSON or IO[bytes] :keyword tool_outputs: Required. :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :param event_handler: The event handler to use for processing events during the run. - :param kwargs: Additional parameters. + :keyword event_handler: The event handler to use for processing events during the run. :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. :rtype: ~azure.ai.projects.models.AsyncAgentRunStream :raises ~azure.core.exceptions.HttpResponseError: @@ -1922,7 +1941,7 @@ async def upload_file( ) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :param file_path: Required. + :keyword file_path: Required. :type file_path: str :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. @@ -1947,13 +1966,18 @@ async def upload_file( Uploads a file for use by other operations, delegating to the generated operations. :param body: JSON. Required if `file` and `purpose` are not provided. - :param file: File content. Required if `body` and `purpose` are not provided. - :param file_path: Path to the file. Required if `body` and `purpose` are not provided. - :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + :type body: Optional[JSON] + :keyword file: File content. Required if `body` and `purpose` are not provided. + :paramtype file: Optional[FileType] + :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. + :paramtype file_path: Optional[str] + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :param filename: The name of the file. - :param kwargs: Additional parameters. + :paramtype purpose: Union[str, _models.FilePurpose, None] + :keyword filename: The name of the file. + :paramtype filename: Optional[str] :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: _models.OpenAIFile :raises FileNotFoundError: If the file_path is invalid. :raises IOError: If there are issues with reading the file. :raises: HttpResponseError for HTTP errors. @@ -1989,7 +2013,7 @@ async def upload_file( async def upload_file_and_poll(self, *, body: JSON, sleep_interval: float = 1, **kwargs: Any) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :param body: Required. + :keyword body: Required. :type body: JSON :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. @@ -2032,7 +2056,7 @@ async def upload_file_and_poll( ) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :param file_path: Required. + :keyword file_path: Required. :type file_path: str :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. @@ -2061,16 +2085,21 @@ async def upload_file_and_poll( Uploads a file for use by other operations, delegating to the generated operations. :param body: JSON. Required if `file` and `purpose` are not provided. - :param file: File content. Required if `body` and `purpose` are not provided. - :param file_path: Path to the file. Required if `body` and `purpose` are not provided. - :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + :type body: Optional[JSON] + :keyword file: File content. Required if `body` and `purpose` are not provided. + :paramtype file: Optional[FileType] + :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. + :paramtype file_path: Optional[str] + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :param filename: The name of the file. + :paramtype purpose: Union[str, _models.FilePurpose, None] + :keyword filename: The name of the file. + :paramtype filename: Optional[str] :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. :paramtype sleep_interval: float - :param kwargs: Additional parameters. :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: _models.OpenAIFile :raises FileNotFoundError: If the file_path is invalid. :raises IOError: If there are issues with reading the file. :raises: HttpResponseError for HTTP errors. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 4800f5dd2ce0..d44308205a56 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -74,6 +74,7 @@ StreamEventData = Union[MessageDeltaChunk, ThreadMessage, ThreadRun, RunStep, None] + def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[str, Any]: """ Remove the parameters, non present in class public fields; return shallow copy of a dictionary. @@ -81,9 +82,11 @@ def _filter_parameters(model_class: Type, parameters: Dict[str, Any]) -> Dict[st **Note:** Classes inherited from the model check that the parameters are present in the list of attributes and if they are not, the error is being raised. This check may not be relevant for classes, not inherited from azure.ai.projects._model_base.Model. - :param model_class: The class of model to be used. + :param Type model_class: The class of model to be used. :param parameters: The parsed dictionary with parameters. + :type parameters: Union[str, Dict[str, Any]] :return: The dictionary with all invalid parameters removed. + :rtype: Dict[str, Any] """ new_params = {} valid_parameters = set( @@ -100,9 +103,11 @@ def _safe_instantiate(model_class: Type, parameters: Union[str, Dict[str, Any]]) """ Instantiate class with the set of parameters from the server. - :param model_class: The class of model to be used. + :param Type model_class: The class of model to be used. :param parameters: The parsed dictionary with parameters. + :type parameters: Union[str, Dict[str, Any]] :return: The class of model_class type if parameters is a dictionary, or the parameters themselves otherwise. + :rtype: Any """ if not isinstance(parameters, dict): return parameters @@ -227,7 +232,9 @@ def _refresh_token(self) -> None: project_client = AIProjectClient( credential=self._credential, - endpoint="not-needed", # Since we are only going to use the "connections" operations, we don't need to supply an endpoint. http://management.azure.com is hard coded in the SDK. + # Since we are only going to use the "connections" operations, we don't need to supply an endpoint. + # http://management.azure.com is hard coded in the SDK. + endpoint="not-needed", subscription_id=self._subscription_id, resource_group_name=self._resource_group_name, project_name=self._project_name, @@ -346,7 +353,7 @@ def execute(self, tool_call: Any) -> Any: """ Execute the tool with the provided tool call. - :param tool_call: The tool call to execute. + :param Any tool_call: The tool call to execute. :return: The output of the tool operations. """ @@ -451,6 +458,7 @@ def definitions(self) -> List[ToolDefinition]: Get the function definitions. :return: A list of function definitions. + :rtype: List[ToolDefinition] """ return cast(List[ToolDefinition], self._definitions) @@ -460,6 +468,7 @@ def resources(self) -> ToolResources: Get the tool resources for the agent. :return: An empty ToolResources as FunctionTool doesn't have specific resources. + :rtype: ToolResources """ return ToolResources() @@ -474,7 +483,8 @@ def execute(self, tool_call: RequiredFunctionToolCall) -> Any: except TypeError as e: error_message = f"Error executing function '{tool_call.function.name}': {e}" logging.error(error_message) - # Return error message as JSON string back to agent in order to make possible self correction to the function call + # Return error message as JSON string back to agent in order to make possible self + # correction to the function call return json.dumps({"error": error_message}) @@ -490,7 +500,8 @@ async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: except TypeError as e: error_message = f"Error executing function '{tool_call.function.name}': {e}" logging.error(error_message) - # Return error message as JSON string back to agent in order to make possible self correction to the function call + # Return error message as JSON string back to agent in order to make possible self correction + # to the function call return json.dumps({"error": error_message}) @@ -505,6 +516,9 @@ def __init__(self): def add_index(self, index: str, name: str): """ Add an index ID to the list of indices used to search. + + :param str index: The index connection id. + :param str name: The index name. """ # TODO self.index_list.append(IndexResource(index_connection_id=index, index_name=name)) @@ -513,6 +527,8 @@ def add_index(self, index: str, name: str): def definitions(self) -> List[ToolDefinition]: """ Get the Azure AI search tool definitions. + + :rtype: List[ToolDefinition] """ return [AzureAISearchToolDefinition()] @@ -520,6 +536,8 @@ def definitions(self) -> List[ToolDefinition]: def resources(self) -> ToolResources: """ Get the Azure AI search resources. + + :rtype: ToolResources """ return ToolResources(azure_ai_search=AzureAISearchResource(index_list=self.index_list)) @@ -545,6 +563,8 @@ def __init__(self, connection_id: str): def resources(self) -> ToolResources: """ Get the connection tool resources. + + :rtype: ToolResources """ return ToolResources() @@ -561,6 +581,8 @@ class BingGroundingTool(ConnectionTool): def definitions(self) -> List[ToolDefinition]: """ Get the Bing grounding tool definitions. + + :rtype: List[ToolDefinition] """ return [BingGroundingToolDefinition(bing_grounding=ToolConnectionList(connection_list=self.connection_ids))] @@ -574,6 +596,8 @@ class SharepointTool(ConnectionTool): def definitions(self) -> List[ToolDefinition]: """ Get the Sharepoint tool definitions. + + :rtype: List[ToolDefinition] """ return [SharepointToolDefinition(sharepoint_grounding=ToolConnectionList(connection_list=self.connection_ids))] @@ -627,6 +651,8 @@ def remove_vector_store(self, store_id: str) -> None: def definitions(self) -> List[ToolDefinition]: """ Get the file search tool definitions. + + :rtype: List[ToolDefinition] """ return [FileSearchToolDefinition()] @@ -634,6 +660,8 @@ def definitions(self) -> List[ToolDefinition]: def resources(self) -> ToolResources: """ Get the file search resources. + + :rtype: ToolResources """ return ToolResources(file_search=FileSearchToolResource(vector_store_ids=list(self.vector_store_ids))) @@ -677,6 +705,8 @@ def remove_file(self, file_id: str) -> None: def definitions(self) -> List[ToolDefinition]: """ Get the code interpreter tool definitions. + + :rtype: List[ToolDefinition] """ return [CodeInterpreterToolDefinition()] @@ -684,6 +714,8 @@ def definitions(self) -> List[ToolDefinition]: def resources(self) -> ToolResources: """ Get the code interpreter resources. + + :rtype: ToolResources """ if not self.file_ids: return ToolResources() @@ -708,7 +740,7 @@ def add(self, tool: Tool): """ Add a tool to the tool set. - :param tool: The tool to add. + :param Tool tool: The tool to add. :raises ValueError: If a tool of the same type already exists. """ self.validate_tool_type(tool) @@ -721,7 +753,7 @@ def remove(self, tool_type: Type[Tool]) -> None: """ Remove a tool of the specified type from the tool set. - :param tool_type: The type of tool to remove. + :param Type[Tool] tool_type: The type of tool to remove. :raises ValueError: If a tool of the specified type is not found. """ for i, tool in enumerate(self._tools): @@ -735,6 +767,8 @@ def remove(self, tool_type: Type[Tool]) -> None: def definitions(self) -> List[ToolDefinition]: """ Get the definitions for all tools in the tool set. + + :rtype: List[ToolDefinition] """ tools = [] for tool in self._tools: @@ -745,6 +779,8 @@ def definitions(self) -> List[ToolDefinition]: def resources(self) -> ToolResources: """ Get the resources for all tools in the tool set. + + :rtype: ToolResources """ tool_resources: Dict[str, Any] = {} for tool in self._tools: @@ -760,6 +796,12 @@ def resources(self) -> ToolResources: def _create_tool_resources_from_dict(self, resources: Dict[str, Any]) -> ToolResources: """ Safely converts a dictionary into a ToolResources instance. + + :param resources: A dictionary of tool resources. Should be a mapping + accepted by ~azure.ai.projects.models.AzureAISearchResource + :type resources: Dict[str, Any] + :return: A ToolResources instance. + :rtype: ToolResources """ try: return ToolResources(**resources) @@ -772,6 +814,7 @@ def get_definitions_and_resources(self) -> Dict[str, Any]: Get the definitions and resources for all tools in the tool set. :return: A dictionary containing the tool resources and definitions. + :rtype: Dict[str, Any] """ return { "tool_resources": self.resources, @@ -782,8 +825,9 @@ def get_tool(self, tool_type: Type[Tool]) -> Tool: """ Get a tool of the specified type from the tool set. - :param tool_type: The type of tool to get. + :param Type[Tool] tool_type: The type of tool to get. :return: The tool of the specified type. + :rtype: Tool :raises ValueError: If a tool of the specified type is not found. """ for tool in self._tools: @@ -801,20 +845,22 @@ def validate_tool_type(self, tool: Tool) -> None: """ Validate the type of the tool. - :param tool_type: The type of the tool to validate. + :param Tool tool: The type of the tool to validate. :raises ValueError: If the tool type is not a subclass of Tool. """ if isinstance(tool, AsyncFunctionTool): raise ValueError( - "AsyncFunctionTool is not supported in ToolSet. To use async functions, use AsyncToolSet and agents operations in azure.ai.projects.aio." + "AsyncFunctionTool is not supported in ToolSet. " + + "To use async functions, use AsyncToolSet and agents operations in azure.ai.projects.aio." ) def execute_tool_calls(self, tool_calls: List[Any]) -> Any: """ Execute a tool of the specified type with the provided tool calls. - :param tool_calls: A list of tool calls to execute. + :param List[Any] tool_calls: A list of tool calls to execute. :return: The output of the tool operations. + :rtype: Any """ tool_outputs = [] @@ -843,20 +889,22 @@ def validate_tool_type(self, tool: Tool) -> None: """ Validate the type of the tool. - :param tool_type: The type of the tool to validate. + :param Tool tool: The type of the tool to validate. :raises ValueError: If the tool type is not a subclass of Tool. """ if isinstance(tool, FunctionTool): raise ValueError( - "FunctionTool is not supported in AsyncToolSet. Please use AsyncFunctionTool instead and provide sync and/or async function(s)." + "FunctionTool is not supported in AsyncToolSet. " + + "Please use AsyncFunctionTool instead and provide sync and/or async function(s)." ) async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: """ Execute a tool of the specified type with the provided tool calls. - :param tool_calls: A list of tool calls to execute. + :param List[Any] tool_calls: A list of tool calls to execute. :return: The output of the tool operations. + :rtype: Any """ tool_outputs = [] @@ -879,55 +927,99 @@ async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: class AgentEventHandler: def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - """Handle message delta events.""" + """Handle message delta events. + + :param MessageDeltaChunk delta: The message delta. + """ def on_thread_message(self, message: "ThreadMessage") -> None: - """Handle thread message events.""" + """Handle thread message events. + + :param ThreadMessage message: The thread message. + """ def on_thread_run(self, run: "ThreadRun") -> None: - """Handle thread run events.""" + """Handle thread run events. + + :param ThreadRun run: The thread run. + """ def on_run_step(self, step: "RunStep") -> None: - """Handle run step events.""" + """Handle run step events. + + :param RunStep step: The run step. + """ def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: - """Handle run step delta events.""" + """Handle run step delta events. + + :param RunStepDeltaChunk delta: The run step delta. + """ def on_error(self, data: str) -> None: - """Handle error events.""" + """Handle error events. + + :param str data: The error event's data. + """ def on_done(self) -> None: """Handle the completion of the stream.""" def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - """Handle any unhandled event types.""" + """Handle any unhandled event types. + + :param str event_type: The event type. + :param Any event_data: The event's data. + """ class AsyncAgentEventHandler: async def on_message_delta(self, delta: "MessageDeltaChunk") -> None: - """Handle message delta events.""" + """Handle message delta events. + + :param MessageDeltaChunk delta: The message delta. + """ async def on_thread_message(self, message: "ThreadMessage") -> None: - """Handle thread message events.""" + """Handle thread message events. + + :param ThreadMessage message: The thread message. + """ async def on_thread_run(self, run: "ThreadRun") -> None: - """Handle thread run events.""" + """Handle thread run events. + + :param ThreadRun run: The thread run. + """ async def on_run_step(self, step: "RunStep") -> None: - """Handle run step events.""" + """Handle run step events. + + :param RunStep step: The run step. + """ async def on_run_step_delta(self, delta: "RunStepDeltaChunk") -> None: - """Handle run step delta events.""" + """Handle run step delta events. + + :param RunStepDeltaChunk delta: The run step delta. + """ async def on_error(self, data: str) -> None: - """Handle error events.""" + """Handle error events. + + :param str data: The error event's data. + """ async def on_done(self) -> None: """Handle the completion of the stream.""" async def on_unhandled_event(self, event_type: str, event_data: Any) -> None: - """Handle any unhandled event types.""" + """Handle any unhandled event types. + + :param str event_type: The event type. + :param Any event_data: The event's data. + """ class AsyncAgentRunStream(AsyncIterator[Tuple[str, Union[str, StreamEventData]]]): @@ -1246,12 +1338,19 @@ def __init__(self, pageable_list: OpenAIPageableListOfThreadMessage): @property def messages(self) -> List[ThreadMessage]: - """Returns all messages in the messages.""" + """Returns all messages in the messages. + + + :rtype: List[ThreadMessage] + """ return self._messages @property def text_messages(self) -> List[MessageTextContent]: - """Returns all text message contents in the messages.""" + """Returns all text message contents in the messages. + + :rtype: List[MessageTextContent] + """ texts = [ content for msg in self._messages for content in msg.content if isinstance(content, MessageTextContent) ] @@ -1259,14 +1358,20 @@ def text_messages(self) -> List[MessageTextContent]: @property def image_contents(self) -> List[MessageImageFileContent]: - """Returns all image file contents from image message contents in the messages.""" + """Returns all image file contents from image message contents in the messages. + + :rtype: List[MessageImageFileContent] + """ return [ content for msg in self._messages for content in msg.content if isinstance(content, MessageImageFileContent) ] @property def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: - """Returns all file citation annotations from text message annotations in the messages.""" + """Returns all file citation annotations from text message annotations in the messages. + + :rtype: List[MessageTextFileCitationAnnotation] + """ annotations = [ annotation for msg in self._messages @@ -1279,7 +1384,10 @@ def file_citation_annotations(self) -> List[MessageTextFileCitationAnnotation]: @property def file_path_annotations(self) -> List[MessageTextFilePathAnnotation]: - """Returns all file path annotations from text message annotations in the messages.""" + """Returns all file path annotations from text message annotations in the messages. + + :rtype: List[MessageTextFilePathAnnotation] + """ annotations = [ annotation for msg in self._messages diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 5d5781a73bcf..d57e1eaff98d 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -105,7 +105,8 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": if connection.authentication_type == AuthenticationType.API_KEY: logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using API key authentication" + "[InferenceOperations.get_chat_completions_client] " + + "Creating ChatCompletionsClient using API key authentication" ) from azure.core.credentials import AzureKeyCredential @@ -113,12 +114,14 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": elif connection.authentication_type == AuthenticationType.ENTRA_ID: # MaaS models do not yet support EntraID auth logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using Entra ID authentication" + "[InferenceOperations.get_chat_completions_client] " + + "Creating ChatCompletionsClient using Entra ID authentication" ) client = ChatCompletionsClient(endpoint=endpoint, credential=connection.properties.token_credential) elif connection.authentication_type == AuthenticationType.SAS: logger.debug( - "[InferenceOperations.get_chat_completions_client] Creating ChatCompletionsClient using SAS authentication" + "[InferenceOperations.get_chat_completions_client] " + + "Creating ChatCompletionsClient using SAS authentication" ) raise ValueError( "Getting chat completions client from a connection with SAS authentication is not yet supported" @@ -250,7 +253,7 @@ def get_azure_openai_client(self, *, api_version: Optional[str] = None, **kwargs auth = "Creating AzureOpenAI using SAS authentication" logger.debug("[InferenceOperations.get_azure_openai_client] %s", auth) client = AzureOpenAI( - # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider + # See https://learn.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python#azure-identity-get-bearer-token-provider # pylint: disable=line-too-long azure_ad_token_provider=get_bearer_token_provider( connection.token_credential, "https://cognitiveservices.azure.com/.default" ), @@ -273,9 +276,10 @@ def get_default( populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError exception if a connection with the given name was not found. - :param connection_type: The connection type. Required. + :keyword connection_type: The connection type. Required. :type connection_type: ~azure.ai.projects.models._models.ConnectionType - :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :keyword with_credentials: Whether to populate the connection properties with authentication credentials. + Optional. :type with_credentials: bool :return: The connection properties, or `None` if there are no connections of the specified type. :rtype: ~azure.ai.projects.models.ConnectionProperties @@ -302,9 +306,10 @@ def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError exception if a connection with the given name was not found. - :param connection_name: Connection Name. Required. + :keyword connection_name: Connection Name. Required. :type connection_name: str - :param with_credentials: Whether to populate the connection properties with authentication credentials. Optional. + :keyword with_credentials: Whether to populate the connection properties with authentication credentials. + Optional. :type with_credentials: bool :return: The connection properties, or `None` if a connection with this name does not exist. :rtype: ~azure.ai.projects.models.ConnectionProperties @@ -345,8 +350,8 @@ def list( ) -> Sequence[ConnectionProperties]: """List the properties of all connections, or all connections of a certain connection type. - :param connection_type: The connection type. Optional. If provided, this method lists connections of this type. - If not provided, all connections are listed. + :keyword connection_type: The connection type. Optional. If provided, this method lists connections of this + type. If not provided, all connections are listed. :type connection_type: ~azure.ai.projects.models._models.ConnectionType :return: A list of connection properties :rtype: Sequence[~azure.ai.projects.models._models.ConnectionProperties] @@ -374,13 +379,14 @@ def _get_trace_exporter(destination: Union[TextIO, str, None]) -> Any: from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter # type: ignore except ModuleNotFoundError as e: raise ModuleNotFoundError( - "OpenTelemetry OTLP exporter is not installed. Please install it using 'pip install opentelemetry-exporter-otlp-proto-grpc'" + "OpenTelemetry OTLP exporter is not installed. " + + "Please install it using 'pip install opentelemetry-exporter-otlp-proto-grpc'" ) from e return OTLPSpanExporter(endpoint=destination) if isinstance(destination, io.TextIOWrapper): if destination is sys.stdout: - # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter + # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter # pylint: disable=line-too-long try: from opentelemetry.sdk.trace.export import ConsoleSpanExporter except ModuleNotFoundError as e: @@ -394,6 +400,7 @@ def _get_trace_exporter(destination: Union[TextIO, str, None]) -> Any: return None + def _get_log_exporter(destination: Union[TextIO, str, None]) -> Any: if isinstance(destination, str): # `destination` is the OTLP endpoint @@ -406,31 +413,29 @@ def _get_log_exporter(destination: Union[TextIO, str, None]) -> Any: except Exception as ex: # since OTel logging is still in beta in Python, we're going to swallow any errors # and just warn about them. - logger.warning( - "Failed to configure OpenTelemetry logging.", exc_info=ex - ) + logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) return None return OTLPLogExporter(endpoint=destination) if isinstance(destination, io.TextIOWrapper): if destination is sys.stdout: - # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter + # See: https://opentelemetry-python.readthedocs.io/en/latest/sdk/trace.export.html#opentelemetry.sdk.trace.export.ConsoleSpanExporter # pylint: disable=line-too-long try: from opentelemetry.sdk._logs.export import ConsoleLogExporter + return ConsoleLogExporter() except ModuleNotFoundError as ex: # since OTel logging is still in beta in Python, we're going to swallow any errors # and just warn about them. - logger.warning( - "Failed to configure OpenTelemetry logging.", exc_info=ex - ) + logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) return None else: raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") return None + def _configure_tracing(span_exporter: Any) -> None: if span_exporter is None: return @@ -456,6 +461,7 @@ def _configure_tracing(span_exporter: Any) -> None: provider = cast(TracerProvider, trace.get_tracer_provider()) provider.add_span_processor(SimpleSpanProcessor(span_exporter)) + def _configure_logging(log_exporter: Any) -> None: if log_exporter is None: return @@ -483,18 +489,17 @@ def _configure_logging(log_exporter: Any) -> None: except Exception as ex: # since OTel logging is still in beta in Python, we're going to swallow any errors # and just warn about them. - logger.warning( - "Failed to configure OpenTelemetry logging.", exc_info=ex - ) + logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) + def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: """Enable tracing and logging to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) endpoint. - :keyword destination: `sys.stdout` to print telemetry to console or a string holding the + :param destination: `sys.stdout` to print telemetry to console or a string holding the OpenTelemetry protocol (OTLP) endpoint. If not provided, this method enables instrumentation, but does not configure OpenTelemetry SDK to export traces and logs. - :paramtype destination: Union[TextIO, str, None] + :type destination: Union[TextIO, str, None] """ span_exporter = _get_trace_exporter(destination) _configure_tracing(span_exporter) @@ -509,7 +514,8 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: settings.tracing_implementation = "opentelemetry" except ModuleNotFoundError: logger.warning( - "Azure SDK tracing plugin is not installed. Please install it using 'pip install azure-core-tracing-opentelemetry'" + "Azure SDK tracing plugin is not installed. " + + "Please install it using 'pip install azure-core-tracing-opentelemetry'" ) try: @@ -538,7 +544,8 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: OpenAIInstrumentor().instrument() except ModuleNotFoundError: logger.warning( - "Could not call `OpenAIInstrumentor().instrument()` since `opentelemetry-instrumentation-openai-v2` is not installed" + "Could not call `OpenAIInstrumentor().instrument()` since " + + "`opentelemetry-instrumentation-openai-v2` is not installed" ) try: @@ -547,7 +554,8 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: LangchainInstrumentor().instrument() except ModuleNotFoundError: logger.warning( - "Could not call LangchainInstrumentor().instrument()` since `opentelemetry-instrumentation-langchain` is not installed" + "Could not call LangchainInstrumentor().instrument()` since " + + "`opentelemetry-instrumentation-langchain` is not installed" ) @@ -560,16 +568,12 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def get_connection_string(self) -> str: - """ - Get the Application Insights connection string associated with the Project's Application Insights resource. - On first call, this method makes a service call to the Application Insights resource URL to get the connection string. - Subsequent calls return the cached connection string, if one exists. - Raises ~azure.core.exceptions.ResourceNotFoundError exception if an Application Insights resource was not - enabled for this project. + """Get the Application Insights connection string associated with the Project's Application Insights resource. :return: The Application Insights connection string if a the resource was enabled for the Project. :rtype: str - :raises ~azure.core.exceptions.ResourceNotFoundError: + :raises ~azure.core.exceptions.ResourceNotFoundError: An Application Insights resource was not + enabled for this project. """ if not self._connection_string: # Get the AI Studio Project properties, including Application Insights resource URL if exists @@ -794,21 +798,34 @@ def create_agent( Creates a new agent with various configurations, delegating to the generated operations. :param body: JSON or IO[bytes]. Required if `model` is not provided. - :param model: The ID of the model to use. Required if `body` is not provided. - :param name: The name of the new agent. - :param description: A description for the new agent. - :param instructions: System instructions for the agent. - :param tools: List of tools definitions for the agent. - :param tool_resources: Resources used by the agent's tools. - :param toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` + :type body: Union[JSON, IO[bytes]] + :keyword model: The ID of the model to use. Required if `body` is not provided. + :paramtype model: str + :keyword name: The name of the new agent. + :paramtype name: Optional[str] + :keyword description: A description for the new agent. + :paramtype description: Optional[str] + :keyword instructions: System instructions for the agent. + :paramtype instructions: Optional[str] + :keyword tools: List of tools definitions for the agent. + :paramtype tools: Optional[List[_models.ToolDefinition]] + :keyword tool_resources: Resources used by the agent's tools. + :paramtype tool_resources: Optional[_models.ToolResources] + :keyword toolset: Collection of tools and resources (alternative to `tools` and `tool_resources` and adds automatic execution logic for functions). - :param temperature: Sampling temperature for generating agent responses. - :param top_p: Nucleus sampling parameter. - :param response_format: Response format for tool calls. - :param metadata: Key/value pairs for storing additional information. - :param content_type: Content type of the body. - :param kwargs: Additional parameters. + :paramtype toolset: Optional[_models.ToolSet] + :keyword temperature: Sampling temperature for generating agent responses. + :paramtype temperature: Optional[float] + :keyword top_p: Nucleus sampling parameter. + :paramtype top_p: Optional[float] + :keyword response_format: Response format for tool calls. + :paramtype response_format: Optional["_types.AgentsApiResponseFormatOption"] + :keyword metadata: Key/value pairs for storing additional information. + :paramtype metadata: Optional[Dict[str, str]] + :keyword content_type: Content type of the body. + :paramtype content_type: str :return: An Agent object. + :rtype: _models.Agent :raises: HttpResponseError for HTTP errors. """ @@ -1541,7 +1558,9 @@ def create_and_process_run( def create_stream( self, thread_id: str, body: JSON, *, content_type: str = "application/json", **kwargs: Any ) -> _models.AgentRunStream: - """Creates a new stream for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + """Creates a new stream for an agent thread. + + Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. :param thread_id: Required. :type thread_id: str @@ -1659,7 +1678,9 @@ def create_stream( def create_stream( self, thread_id: str, body: IO[bytes], *, content_type: str = "application/json", **kwargs: Any ) -> _models.AgentRunStream: - """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + """Creates a new run for an agent thread. + + Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. :param thread_id: Required. :type thread_id: str @@ -1696,7 +1717,9 @@ def create_stream( event_handler: Optional[_models.AgentEventHandler] = None, **kwargs: Any, ) -> _models.AgentRunStream: - """Creates a new run for an agent thread. terminating when the Run enters a terminal state with a ``data: [DONE]`` message. + """Creates a new run for an agent thread. + + Terminating when the Run enters a terminal state with a ``data: [DONE]`` message. :param thread_id: Required. :type thread_id: str @@ -1908,8 +1931,7 @@ def submit_tool_outputs_to_run( :type body: JSON or IO[bytes] :keyword tool_outputs: Required. :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :param event_handler: The event handler to use for processing events during the run. - :param kwargs: Additional parameters. + :keyword event_handler: The event handler to use for processing events during the run. :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -2032,8 +2054,7 @@ def submit_tool_outputs_to_stream( :type body: JSON or IO[bytes] :keyword tool_outputs: Required. :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :param event_handler: The event handler to use for processing events during the run. - :param kwargs: Additional parameters. + :keyword event_handler: The event handler to use for processing events during the run. :return: AgentRunStream. AgentRunStream is compatible with Iterable and supports streaming. :rtype: ~azure.ai.projects.models.AgentRunStream :raises ~azure.core.exceptions.HttpResponseError: @@ -2118,7 +2139,7 @@ def upload_file( ) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :param file_path: Required. + :keyword file_path: Required. :type file_path: str :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. @@ -2143,13 +2164,18 @@ def upload_file( Uploads a file for use by other operations, delegating to the generated operations. :param body: JSON. Required if `file` and `purpose` are not provided. - :param file: File content. Required if `body` and `purpose` are not provided. - :param file_path: Path to the file. Required if `body` and `purpose` are not provided. - :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + :type body: Optional[JSON] + :keyword file: File content. Required if `body` and `purpose` are not provided. + :paramtype file: Optional[FileType] + :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. + :paramtype file_path: Optional[str] + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + :paramtype purpose: Union[str, _models.FilePurpose, None] "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :param filename: The name of the file. - :param kwargs: Additional parameters. + :keyword filename: The name of the file. + :paramtype filename: Optional[str] :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: _models.OpenAIFile :raises FileNotFoundError: If the file_path is invalid. :raises IOError: If there are issues with reading the file. :raises: HttpResponseError for HTTP errors. @@ -2228,7 +2254,7 @@ def upload_file_and_poll( ) -> _models.OpenAIFile: """Uploads a file for use by other operations. - :param file_path: Required. + :keyword file_path: Required. :type file_path: str :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required. @@ -2257,16 +2283,21 @@ def upload_file_and_poll( Uploads a file for use by other operations, delegating to the generated operations. :param body: JSON. Required if `file` and `purpose` are not provided. - :param file: File content. Required if `body` and `purpose` are not provided. - :param file_path: Path to the file. Required if `body` and `purpose` are not provided. - :param purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", + :type body: Optional[JSON] + :keyword file: File content. Required if `body` and `purpose` are not provided. + :paramtype file: Optional[FileType] + :keyword file_path: Path to the file. Required if `body` and `purpose` are not provided. + :paramtype file_path: Optional[str] + :keyword purpose: Known values are: "fine-tune", "fine-tune-results", "assistants", "assistants_output", "batch", "batch_output", and "vision". Required if `body` and `file` are not provided. - :param filename: The name of the file. + :paramtype purpose: Union[str, _models.FilePurpose, None] + :keyword filename: The name of the file. + :paramtype filename: Optional[str] :keyword sleep_interval: Time to wait before polling for the status of the uploaded file. Default value is 1. :paramtype sleep_interval: float - :param kwargs: Additional parameters. :return: OpenAIFile. The OpenAIFile is compatible with MutableMapping + :rtype: _models.OpenAIFile :raises FileNotFoundError: If the file_path is invalid. :raises IOError: If there are issues with reading the file. :raises: HttpResponseError for HTTP errors. @@ -2653,6 +2684,7 @@ def save_file(self, file_id: str, file_name: str, target_dir: Optional[Union[str :param file_name: The name of the file to be saved. :type file_name: str :param target_dir: The directory where the file should be saved. Defaults to the current working directory. + :type target_dir: Optional[Union[str, Path]] :raises ValueError: If the target path is not a directory or the file name is invalid. :raises RuntimeError: If file content retrieval fails or no content is found. :raises TypeError: If retrieved chunks are not bytes-like objects. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py index 01d37b7e1912..8018ad00fd33 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py @@ -210,10 +210,10 @@ def is_instrumented(self): def set_enable_content_recording(self, enable_content_recording: bool = False) -> None: """This function sets the content recording value. - :param enable_content_tracing: Indicates whether tracing of message content should be enabled. + :param enable_content_recording: Indicates whether tracing of message content should be enabled. This also controls whether function call tool function names, parameter names and parameter values are traced. - :type enable_content_tracing: bool + :type enable_content_recording: bool """ self._set_enable_content_recording(enable_content_recording=enable_content_recording) @@ -432,6 +432,7 @@ def agent_api_response_to_str(response_format: Any) -> Optional[str]: :param response_format: The response format. :type response_format: ~azure.ai.projects._types.AgentsApiResponseFormatOption :returns: string for the response_format. + :rtype: Optional[str] :raises: Value error if response_format is not of type AgentsApiResponseFormatOption. """ if isinstance(response_format, str) or response_format is None: @@ -1557,10 +1558,10 @@ def _is_instrumented(self): def _set_enable_content_recording(self, enable_content_recording: bool = False) -> None: """This function sets the content recording value. - :param enable_content_tracing: Indicates whether tracing of message content should be enabled. + :param enable_content_recording: Indicates whether tracing of message content should be enabled. This also controls whether function call tool function names, parameter names and parameter values are traced. - :type enable_content_tracing: bool + :type enable_content_recording: bool """ global _trace_agents_content _trace_agents_content = enable_content_recording @@ -1644,7 +1645,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): if self.last_run and self.last_run.last_error: self.span.set_status( StatusCode.ERROR, # pyright: ignore [reportPossiblyUnboundVariable] - self.last_run.last_error.message + self.last_run.last_error.message, ) self.span.add_attribute(ERROR_TYPE, self.last_run.last_error.code) From 78d345dd19340cddc7b3444c4a88d64cb4a61037 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 12 Nov 2024 09:35:17 -0800 Subject: [PATCH 112/138] Enable test recordings for tests\inference, tests\telemetry and tests\connection (#38475) --- .../azure_ai_projects_tests.env | 21 ++- sdk/ai/azure-ai-projects/dev_requirements.txt | 4 +- sdk/ai/azure-ai-projects/tests/conftest.py | 53 +++++-- .../tests/connections/connection_test_base.py | 4 +- .../tests/connections/test_connections.py | 136 +++++++---------- .../connections/test_connections_async.py | 138 ++++++------------ .../tests/inference/inference_test_base.py | 3 +- .../tests/inference/test_inference.py | 32 ++-- .../tests/inference/test_inference_async.py | 31 ++-- .../tests/telemetry/telemetry_test_base.py | 2 +- .../tests/telemetry/test_telemetry.py | 6 - .../tests/telemetry/test_telemetry_async.py | 6 - 12 files changed, 194 insertions(+), 242 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env index d000ead3a824..3280ed9d6826 100644 --- a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env +++ b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env @@ -5,6 +5,7 @@ # Default to live tests without recordings: AZURE_TEST_RUN_LIVE=true AZURE_SKIP_LIVE_RECORDING=true +PROXY_URL=http://localhost:5000 ######################################################################################################################## # Connection tests @@ -17,10 +18,11 @@ AZURE_SKIP_LIVE_RECORDING=true # such that you set a connection name that is different than the default connection name. # AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING= -AZURE_AI_PROJECTS_CONNECTIONS_TESTS_DEFAULT_AOAI_CONNECTION_NAME= -AZURE_AI_PROJECTS_CONNECTIONS_TESTS_DEFAULT_AISERVICES_CONNECTION_NAME= -AZURE_AI_PROJECTS_CONNECTIONS_TESTS_AOAI_CONNECTION_NAME=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_DEFAULT_AOAI_CONNECTION_NAME} -AZURE_AI_PROJECTS_CONNECTIONS_TESTS_AISERVICES_CONNECTION_NAME=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_DEFAULT_AISERVICES_CONNECTION_NAME} +AZURE_AI_PROJECTS_CONNECTIONS_TESTS_AOAI_CONNECTION_NAME= +AZURE_AI_PROJECTS_CONNECTIONS_TESTS_AISERVICES_CONNECTION_NAME= +# TODO: Define different defaults, once you have a proper AI project set up for testing +#AZURE_AI_PROJECTS_CONNECTIONS_TESTS_DEFAULT_AOAI_CONNECTION_NAME= +#AZURE_AI_PROJECTS_CONNECTIONS_TESTS_DEFAULT_AISERVICES_CONNECTION_NAME= ######################################################################################################################## @@ -32,7 +34,7 @@ AZURE_AI_PROJECTS_CONNECTIONS_TESTS_AISERVICES_CONNECTION_NAME=${AZURE_AI_PROJEC # Populate the Azure OpenAI api-version and model deployment names below. # Note: See Azure OpenAI api-versions here: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs # -AZURE_AI_PROJECTS_INFERENCE_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING} +AZURE_AI_PROJECTS_INFERENCE_TESTS_PROJECT_CONNECTION_STRING= AZURE_AI_PROJECTS_INFERENCE_TESTS_AOAI_API_VERSION= AZURE_AI_PROJECTS_INFERENCE_TESTS_AOAI_MODEL_DEPLOYMENT_NAME= AZURE_AI_PROJECTS_INFERENCE_TESTS_AISERVICES_MODEL_DEPLOYMENT_NAME= @@ -41,7 +43,7 @@ AZURE_AI_PROJECTS_INFERENCE_TESTS_AISERVICES_MODEL_DEPLOYMENT_NAME= ######################################################################################################################## # Telemetry tests # -AZURE_AI_PROJECTS_TELEMETRY_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_PROJECTS_CONNECTIONS_TESTS_PROJECT_CONNECTION_STRING} +AZURE_AI_PROJECTS_TELEMETRY_TESTS_PROJECT_CONNECTION_STRING= ######################################################################################################################## @@ -50,7 +52,12 @@ AZURE_AI_PROJECTS_TELEMETRY_TESTS_PROJECT_CONNECTION_STRING=${AZURE_AI_PROJECTS_ ######################################################################################################################## -# Evaluation tests +# Evaluations tests +# +# To run evaluation test you need AI Studio project with +# - A default AIServices resource with at least one chat-completions model deployed (from OpenAI or non-OpenAI) +# - A default Azure OpenAI resource connected with at least one chat-completions OpenAI model deployed +# Populate the Azure OpenAI api-version and model deployment names below. # AZURE_AI_PROJECTS_EVALUATIONS_TESTS_PROJECT_CONNECTION_STRING= AZURE_AI_PROJECTS_EVALUATIONS_TESTS_DEFAULT_AOAI_CONNECTION_NAME= diff --git a/sdk/ai/azure-ai-projects/dev_requirements.txt b/sdk/ai/azure-ai-projects/dev_requirements.txt index 445b9b5aaf20..751bb54c16ea 100644 --- a/sdk/ai/azure-ai-projects/dev_requirements.txt +++ b/sdk/ai/azure-ai-projects/dev_requirements.txt @@ -2,4 +2,6 @@ ../../core/azure-core ../../identity/azure-identity ../../core/azure-core-tracing-opentelemetry -aiohttp \ No newline at end of file +aiohttp +azure-ai-inference +openai diff --git a/sdk/ai/azure-ai-projects/tests/conftest.py b/sdk/ai/azure-ai-projects/tests/conftest.py index 303dc2e6bd5e..563cc1239509 100644 --- a/sdk/ai/azure-ai-projects/tests/conftest.py +++ b/sdk/ai/azure-ai-projects/tests/conftest.py @@ -5,7 +5,13 @@ import os import pytest -from devtools_testutils import remove_batch_sanitizers, get_credential, test_proxy, add_general_regex_sanitizer +from devtools_testutils import ( + remove_batch_sanitizers, + get_credential, + test_proxy, + add_general_regex_sanitizer, + add_body_key_sanitizer, +) from dotenv import load_dotenv, find_dotenv from azure.ai.projects import AIProjectClient @@ -17,10 +23,10 @@ class SanitizedValues: SUBSCRIPTION_ID = "00000000-0000-0000-0000-000000000000" RESOURCE_GROUP_NAME = "00000" WORKSPACE_NAME = "00000" - CONNECTION_NAME = "00000" DATASET_NAME = "00000" TENANT_ID = "00000000-0000-0000-0000-000000000000" USER_OBJECT_ID = "00000000-0000-0000-0000-000000000000" + API_KEY = "00000000000000000000000000000000000000000000000000000000000000000000" @pytest.fixture(scope="session") @@ -39,13 +45,6 @@ def mock_dataset_name(): } -@pytest.fixture(scope="session") -def mock_connection_name(): - return { - "connection_name": f"{SanitizedValues.CONNECTION_NAME}", - } - - # autouse=True will trigger this fixture on each pytest run, even if it's not explicitly used by a test method @pytest.fixture(scope="session", autouse=True) def start_proxy(test_proxy): @@ -53,36 +52,58 @@ def start_proxy(test_proxy): @pytest.fixture(scope="session", autouse=True) -def add_sanitizers(test_proxy, mock_project_scope, mock_dataset_name, mock_connection_name): - # Remove the following sanitizers since certain fields are needed in tests and are non-sensitive: - # - AZSDK3493: $..name +def add_sanitizers(test_proxy, mock_project_scope, mock_dataset_name): def azure_workspace_triad_sanitizer(): """Sanitize subscription, resource group, and workspace.""" + add_general_regex_sanitizer( regex=r"/subscriptions/([-\w\._\(\)]+)", value=mock_project_scope["subscription_id"], group_for_replace="1", ) + add_general_regex_sanitizer( regex=r"/resource[gG]roups/([-\w\._\(\)]+)", value=mock_project_scope["resource_group_name"], group_for_replace="1", ) + add_general_regex_sanitizer( regex=r"/workspaces/([-\w\._\(\)]+)", value=mock_project_scope["project_name"], group_for_replace="1" ) + # TODO (Darren): Check why this is needed in addition to the above add_general_regex_sanitizer( - regex=r"/connections/([-\w\._\(\)]+)", value=mock_connection_name["connection_name"], group_for_replace="1" + regex=r"%2Fsubscriptions%2F([-\w\._\(\)]+)", + value=mock_project_scope["subscription_id"], + group_for_replace="1", ) + # TODO (Darren): Check why this is needed in addition to the above add_general_regex_sanitizer( - regex=r"/data/([-\w\._\(\)]+)", value=mock_dataset_name["dataset_name"], group_for_replace="1" + regex=r"%2Fresource[gG]roups%2F([-\w\._\(\)]+)", + value=mock_project_scope["resource_group_name"], + group_for_replace="1", ) - add_general_regex_sanitizer(regex=r"/runs/([-\w\._\(\)]+)", value="Sanitized", group_for_replace="1") - azure_workspace_triad_sanitizer() + add_general_regex_sanitizer(regex=r"/runs/([-\w\._\(\)]+)", value="Sanitized", group_for_replace="1") + + add_general_regex_sanitizer( + regex=r"/data/([-\w\._\(\)]+)", value=mock_dataset_name["dataset_name"], group_for_replace="1" + ) + + # Sanitize Application Insights connection string from service response (/tests/telemetry) + add_body_key_sanitizer( + json_path="properties.ConnectionString", + value="InstrumentationKey=00000000-0000-0000-0000-000000000000;IngestionEndpoint=https://region.applicationinsights.azure.com/;LiveEndpoint=https://region.livediagnostics.monitor.azure.com/;ApplicationId=00000000-0000-0000-0000-000000000000", + ) + + # Sanitize API key from service response (/tests/connections) + add_body_key_sanitizer(json_path="properties.credentials.key", value="Sanitized") + + # Remove the following sanitizers since certain fields are needed in tests and are non-sensitive: + # - AZSDK3493: $..name remove_batch_sanitizers(["AZSDK3493"]) diff --git a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py index dfe9f7024b87..c44ece6b70e0 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py @@ -15,9 +15,7 @@ servicePreparerConnectionsTests = functools.partial( EnvironmentVariableLoader, "azure_ai_projects_connections_tests", - azure_ai_projects_connections_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;project-name", - azure_ai_projects_connections_tests_default_aoai_connection_name="default-aoai-connection-name", - azure_ai_projects_connections_tests_default_aiservices_connection_name="default-aiservices-connection-name", + azure_ai_projects_connections_tests_project_connection_string="region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;project-name", azure_ai_projects_connections_tests_aoai_connection_name="aoai-connection-name", azure_ai_projects_connections_tests_aiservices_connection_name="aiservices-connection-name", ) diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py index 10fd453fd4e0..9bc7d4f90e6e 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py @@ -22,8 +22,9 @@ def test_connections_get(self, **kwargs): with self.get_sync_client(**kwargs) as project_client: for with_credentials in [True, False]: + try: - connection_properties = project_client.connections.get( + _ = project_client.connections.get( connection_name=ConnectionsTestBase.NON_EXISTING_CONNECTION_NAME, with_credentials=with_credentials, ) @@ -32,56 +33,41 @@ def test_connections_get(self, **kwargs): print(e) assert ConnectionsTestBase.EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_NAME in e.message - connection = project_client.connections.get(connection_name=aoai_connection, with_credentials=False) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - False, - expected_connection_name=aoai_connection, - expected_connection_type=ConnectionType.AZURE_OPEN_AI, - ) - - connection = project_client.connections.get(connection_name=aoai_connection, with_credentials=True) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - True, - expected_connection_name=aoai_connection, - expected_connection_type=ConnectionType.AZURE_OPEN_AI, - ) - - connection = project_client.connections.get(connection_name=aiservices_connection, with_credentials=False) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - False, - expected_connection_name=aiservices_connection, - expected_connection_type=ConnectionType.AZURE_AI_SERVICES, - ) - - connection = project_client.connections.get(connection_name=aiservices_connection, with_credentials=True) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - True, - expected_connection_name=aiservices_connection, - expected_connection_type=ConnectionType.AZURE_AI_SERVICES, - ) + connection = project_client.connections.get( + connection_name=aoai_connection, with_credentials=with_credentials + ) + print(connection) + ConnectionsTestBase.validate_connection( + connection, + with_credentials, + expected_connection_name=aoai_connection, + expected_connection_type=ConnectionType.AZURE_OPEN_AI, + ) + + connection = project_client.connections.get( + connection_name=aiservices_connection, with_credentials=with_credentials + ) + print(connection) + ConnectionsTestBase.validate_connection( + connection, + with_credentials, + expected_connection_name=aiservices_connection, + expected_connection_type=ConnectionType.AZURE_AI_SERVICES, + ) @servicePreparerConnectionsTests() @recorded_by_proxy def test_connections_get_default(self, **kwargs): - default_aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_default_aoai_connection_name") - default_serverless_connection = kwargs.pop( - "azure_ai_projects_connections_tests_default_aiservices_connection_name" - ) + # TODO: Use default connection names + default_aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_aoai_connection_name") + default_serverless_connection = kwargs.pop("azure_ai_projects_connections_tests_aiservices_connection_name") with self.get_sync_client(**kwargs) as project_client: for with_credentials in [True, False]: try: - connection_properties = project_client.connections.get_default( + _ = project_client.connections.get_default( connection_type=ConnectionsTestBase.NON_EXISTING_CONNECTION_TYPE, with_credentials=with_credentials, ) @@ -90,49 +76,27 @@ def test_connections_get_default(self, **kwargs): print(e) assert ConnectionsTestBase.EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_TYPE in e.message - connection = project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=False - ) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - False, - expected_connection_name=default_aoai_connection, - expected_connection_type=ConnectionType.AZURE_OPEN_AI, - ) - - connection = project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True - ) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - True, - expected_connection_name=default_aoai_connection, - expected_connection_type=ConnectionType.AZURE_OPEN_AI, - ) - - connection = project_client.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=False - ) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - False, - expected_connection_name=default_serverless_connection, - expected_connection_type=ConnectionType.AZURE_AI_SERVICES, - ) - - connection = project_client.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True - ) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - True, - expected_connection_name=default_serverless_connection, - expected_connection_type=ConnectionType.AZURE_AI_SERVICES, - ) + connection = project_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=with_credentials + ) + print(connection) + ConnectionsTestBase.validate_connection( + connection, + with_credentials, + expected_connection_name=default_aoai_connection, + expected_connection_type=ConnectionType.AZURE_OPEN_AI, + ) + + connection = project_client.connections.get_default( + connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=with_credentials + ) + print(connection) + ConnectionsTestBase.validate_connection( + connection, + with_credentials, + expected_connection_name=default_serverless_connection, + expected_connection_type=ConnectionType.AZURE_AI_SERVICES, + ) @servicePreparerConnectionsTests() @recorded_by_proxy @@ -150,7 +114,7 @@ def test_connections_list(self, **kwargs): connection_type=ConnectionType.AZURE_OPEN_AI, ) count_aoai = len(connections) - print("====> Listing of all Azure Open AI connections (found {count_aoai}):") + print(f"====> Listing of all Azure Open AI connections (found {count_aoai}):") for connection in connections: print(connection) ConnectionsTestBase.validate_connection(connection, False) @@ -159,7 +123,7 @@ def test_connections_list(self, **kwargs): connection_type=ConnectionType.AZURE_AI_SERVICES, ) count_serverless = len(connections) - print("====> Listing of all Serverless connections (found {count_serverless}):") + print(f"====> Listing of all Serverless connections (found {count_serverless}):") for connection in connections: print(connection) ConnectionsTestBase.validate_connection(connection, False) diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py index a2299ecc334d..0c7ba272af09 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py @@ -22,7 +22,7 @@ async def test_connections_get_async(self, **kwargs): for with_credentials in [True, False]: try: - connection_properties = await project_client.connections.get( + _ = await project_client.connections.get( connection_name=ConnectionsTestBase.NON_EXISTING_CONNECTION_NAME, with_credentials=with_credentials, ) @@ -31,60 +31,40 @@ async def test_connections_get_async(self, **kwargs): print(e) assert ConnectionsTestBase.EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_NAME in e.message - connection = await project_client.connections.get(connection_name=aoai_connection, with_credentials=False) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - False, - expected_connection_name=aoai_connection, - expected_connection_type=ConnectionType.AZURE_OPEN_AI, - ) - - connection = await project_client.connections.get(connection_name=aoai_connection, with_credentials=True) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - True, - expected_connection_name=aoai_connection, - expected_connection_type=ConnectionType.AZURE_OPEN_AI, - ) - - connection = await project_client.connections.get( - connection_name=aiservices_connection, with_credentials=False - ) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - False, - expected_connection_name=aiservices_connection, - expected_connection_type=ConnectionType.AZURE_AI_SERVICES, - ) - - connection = await project_client.connections.get( - connection_name=aiservices_connection, with_credentials=True - ) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - True, - expected_connection_name=aiservices_connection, - expected_connection_type=ConnectionType.AZURE_AI_SERVICES, - ) + connection = await project_client.connections.get( + connection_name=aoai_connection, with_credentials=with_credentials + ) + print(connection) + ConnectionsTestBase.validate_connection( + connection, + with_credentials, + expected_connection_name=aoai_connection, + expected_connection_type=ConnectionType.AZURE_OPEN_AI, + ) + + connection = await project_client.connections.get( + connection_name=aiservices_connection, with_credentials=with_credentials + ) + print(connection) + ConnectionsTestBase.validate_connection( + connection, + with_credentials, + expected_connection_name=aiservices_connection, + expected_connection_type=ConnectionType.AZURE_AI_SERVICES, + ) @servicePreparerConnectionsTests() @recorded_by_proxy_async async def test_connections_get_default_async(self, **kwargs): - default_aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_default_aoai_connection_name") - default_serverless_connection = kwargs.pop( - "azure_ai_projects_connections_tests_default_aiservices_connection_name" - ) + default_aoai_connection = kwargs.pop("azure_ai_projects_connections_tests_aoai_connection_name") + default_serverless_connection = kwargs.pop("azure_ai_projects_connections_tests_aiservices_connection_name") async with self.get_async_client(**kwargs) as project_client: for with_credentials in [True, False]: try: - connection_properties = await project_client.connections.get_default( + _ = await project_client.connections.get_default( connection_type=ConnectionsTestBase.NON_EXISTING_CONNECTION_TYPE, with_credentials=with_credentials, ) @@ -93,49 +73,27 @@ async def test_connections_get_default_async(self, **kwargs): print(e) assert ConnectionsTestBase.EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_TYPE in e.message - connection = await project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=False - ) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - False, - expected_connection_name=default_aoai_connection, - expected_connection_type=ConnectionType.AZURE_OPEN_AI, - ) - - connection = await project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True - ) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - True, - expected_connection_name=default_aoai_connection, - expected_connection_type=ConnectionType.AZURE_OPEN_AI, - ) - - connection = await project_client.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=False - ) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - False, - expected_connection_name=default_serverless_connection, - expected_connection_type=ConnectionType.AZURE_AI_SERVICES, - ) - - connection = await project_client.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True - ) - print(connection) - ConnectionsTestBase.validate_connection( - connection, - True, - expected_connection_name=default_serverless_connection, - expected_connection_type=ConnectionType.AZURE_AI_SERVICES, - ) + connection = await project_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=with_credentials + ) + print(connection) + ConnectionsTestBase.validate_connection( + connection, + with_credentials, + expected_connection_name=default_aoai_connection, + expected_connection_type=ConnectionType.AZURE_OPEN_AI, + ) + + connection = await project_client.connections.get_default( + connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=with_credentials + ) + print(connection) + ConnectionsTestBase.validate_connection( + connection, + with_credentials, + expected_connection_name=default_serverless_connection, + expected_connection_type=ConnectionType.AZURE_AI_SERVICES, + ) @servicePreparerConnectionsTests() @recorded_by_proxy_async @@ -153,7 +111,7 @@ async def test_connections_list_async(self, **kwargs): connection_type=ConnectionType.AZURE_OPEN_AI, ) count_aoai = len(connections) - print("====> Listing of all Azure Open AI connections (found {count_aoai}):") + print(f"====> Listing of all Azure Open AI connections (found {count_aoai}):") for connection in connections: print(connection) ConnectionsTestBase.validate_connection(connection, False) @@ -162,7 +120,7 @@ async def test_connections_list_async(self, **kwargs): connection_type=ConnectionType.AZURE_AI_SERVICES, ) count_serverless = len(connections) - print("====> Listing of all Serverless connections (found {count_serverless}):") + print(f"====> Listing of all Serverless connections (found {count_serverless}):") for connection in connections: print(connection) ConnectionsTestBase.validate_connection(connection, False) diff --git a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py index 4c3f3687168f..380531719e9a 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/inference/inference_test_base.py @@ -3,6 +3,7 @@ # Licensed under the MIT License. # ------------------------------------ import sys +import os import logging import functools from azure.ai.projects import AIProjectClient @@ -12,7 +13,7 @@ servicePreparerInferenceTests = functools.partial( EnvironmentVariableLoader, "azure_ai_projects_inference_tests", - azure_ai_projects_inference_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;project-name", + azure_ai_projects_inference_tests_project_connection_string="region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;project-name", azure_ai_projects_inference_tests_aoai_api_version="aoai-api-version", azure_ai_projects_inference_tests_aoai_model_deployment_name="aoai-model-deployment-name", azure_ai_projects_inference_tests_aiservices_model_deployment_name="aoai-model-deployment-name", diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py index b6eb42474899..3d7dd807c558 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. # ------------------------------------ import pprint -from devtools_testutils import recorded_by_proxy +from devtools_testutils import recorded_by_proxy, is_live_and_not_recording from inference_test_base import InferenceTestBase, servicePreparerInferenceTests from azure.ai.inference.models import SystemMessage, UserMessage @@ -19,18 +19,23 @@ def test_inference_get_azure_openai_client(self, **kwargs): with self.get_sync_client(**kwargs) as project_client: # See API versions in https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#api-specs with project_client.inference.get_azure_openai_client(api_version=api_version) as azure_openai_client: - response = azure_openai_client.chat.completions.create( - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - } - ], - model=model, - ) - pprint.pprint(response) - contains = ["5280", "5,280"] - assert any(item in response.choices[0].message.content for item in contains) + if is_live_and_not_recording(): + response = azure_openai_client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + } + ], + model=model, + ) + print("\nAzureOpenAI response:") + pprint.pprint(response) + contains = ["5280", "5,280"] + assert any(item in response.choices[0].message.content for item in contains) + else: + print("Skipped chat completions call with AOAI client, because it cannot be recorded.") + pass @servicePreparerInferenceTests() @recorded_by_proxy @@ -45,6 +50,7 @@ def test_inference_get_chat_completions_client(self, **kwargs): UserMessage(content="How many feet are in a mile?"), ], ) + print("\nChatCompletionsClient response:") pprint.pprint(response) contains = ["5280", "5,280"] assert any(item in response.choices[0].message.content for item in contains) diff --git a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py index 75d386e0dc8e..510e7d8cabe0 100644 --- a/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py +++ b/sdk/ai/azure-ai-projects/tests/inference/test_inference_async.py @@ -4,6 +4,7 @@ # ------------------------------------ import pprint from devtools_testutils.aio import recorded_by_proxy_async +from devtools_testutils import is_live_and_not_recording from inference_test_base import InferenceTestBase, servicePreparerInferenceTests from azure.ai.inference.models import SystemMessage, UserMessage @@ -21,18 +22,23 @@ async def test_inference_get_azure_openai_client_async(self, **kwargs): async with await project_client.inference.get_azure_openai_client( api_version=api_version ) as azure_openai_client: - response = await azure_openai_client.chat.completions.create( - messages=[ - { - "role": "user", - "content": "How many feet are in a mile?", - } - ], - model=model, - ) - pprint.pprint(response) - contains = ["5280", "5,280"] - assert any(item in response.choices[0].message.content for item in contains) + if is_live_and_not_recording(): + response = await azure_openai_client.chat.completions.create( + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + } + ], + model=model, + ) + print("\nAsyncAzureOpenAI response:") + pprint.pprint(response) + contains = ["5280", "5,280"] + assert any(item in response.choices[0].message.content for item in contains) + else: + print("Skipped chat completions call with AOAI client, because it cannot be recorded.") + pass @servicePreparerInferenceTests() @recorded_by_proxy_async @@ -47,6 +53,7 @@ async def test_inference_get_chat_completions_client_async(self, **kwargs): UserMessage(content="How many feet are in a mile?"), ], ) + print("\nAsync ChatCompletionsClient response:") pprint.pprint(response) contains = ["5280", "5,280"] assert any(item in response.choices[0].message.content for item in contains) diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py b/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py index 607a1aec85f9..07c09aa573d4 100644 --- a/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/telemetry/telemetry_test_base.py @@ -17,7 +17,7 @@ servicePreparerTelemetryTests = functools.partial( EnvironmentVariableLoader, "azure_ai_projects_telemetry_test", - azure_ai_projects_telemetry_tests_project_connection_string="azure-region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;project-name", + azure_ai_projects_telemetry_tests_project_connection_string="region.api.azureml.ms;00000000-0000-0000-0000-000000000000;rg-name;project-name", ) # Set to True to enable SDK logging diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry.py b/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry.py index a49d87f46469..23d7f81a5ee2 100644 --- a/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry.py +++ b/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry.py @@ -26,9 +26,3 @@ def test_telemetry_enable_console_tracing(self, **kwargs): with self.get_sync_client(**kwargs) as project_client: project_client.telemetry.enable(destination=sys.stdout) # TODO: Create inference client and do chat completions. How do I know if traces were emitted? - - @servicePreparerTelemetryTests() - def test_telemetry_enable_otlp_tracing(self, **kwargs): - with self.get_sync_client(**kwargs) as project_client: - project_client.telemetry.enable(destination="https://some.otlp.collector.endpoint") - # TODO: Create inference client and do chat completions. Test proxy will log attempt at telemetry call. diff --git a/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry_async.py b/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry_async.py index adadf9dc224d..ec803e04ef97 100644 --- a/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry_async.py +++ b/sdk/ai/azure-ai-projects/tests/telemetry/test_telemetry_async.py @@ -25,9 +25,3 @@ async def test_telemetry_enable_console_tracing_async(self, **kwargs): async with self.get_async_client(**kwargs) as project_client: project_client.telemetry.enable(destination=sys.stdout) # TODO: Create inference client and do chat completions. How do I know if traces were emitted? - - @servicePreparerTelemetryTests() - async def test_telemetry_enable_otlp_tracing(self, **kwargs): - async with self.get_async_client(**kwargs) as project_client: - project_client.telemetry.enable(destination="https://some.otlp.collector.endpoint") - # TODO: Create inference client and do chat completions. Test proxy will log attempt at telemetry call. From fc77caad4d3b2243813af0a034cefbbdd397850e Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 12 Nov 2024 10:40:02 -0800 Subject: [PATCH 113/138] Update cspell.json --- .vscode/cspell.json | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.vscode/cspell.json b/.vscode/cspell.json index 70752af4cc66..44f82c84ffab 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -1328,7 +1328,9 @@ "filename": "sdk/ai/azure-ai-projects/**", "words": [ "aiservices", - "OTEL" + "OTEL", + "GENAI", + "fspath" ] }, { From 90e27d4e72d1340676c8ce849af084f33b4d0ebd Mon Sep 17 00:00:00 2001 From: Howie Leung Date: Tue, 12 Nov 2024 10:54:21 -0800 Subject: [PATCH 114/138] Removed enterprise and add installation for tracing (#38483) * Removed enterprise and add installation for tracing * clean up * resolved comments --- sdk/ai/azure-ai-projects/README.md | 90 +++++------------- sdk/ai/azure-ai-projects/assets.json | Bin 0 -> 318 bytes ...nt-p8a1RfPaN1HMCT69X8YoEjlp_image_file.png | Bin 0 -> 176468 bytes ...nt-yH17pFqN8YkDalDYSHearWdv_image_file.png | Bin 0 -> 155048 bytes 4 files changed, 23 insertions(+), 67 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/assets.json create mode 100644 sdk/ai/azure-ai-projects/samples/agents/assistant-p8a1RfPaN1HMCT69X8YoEjlp_image_file.png create mode 100644 sdk/ai/azure-ai-projects/samples/agents/assistant-yH17pFqN8YkDalDYSHearWdv_image_file.png diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 4b25c24a575d..c0f46f0174a9 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -42,25 +42,23 @@ TODO ## Examples -### Agents +### Agents (Private Preview) Agents in the Azure AI Projects client library are designed to facilitate various interactions and operations within your AI projects. They serve as the core components that manage and execute tasks, leveraging different tools and resources to achieve specific goals. The following steps outline the typical sequence for interacting with agents: +Agents are actively being developed. A sign-up form for private preview is coming soon. + - Create project client - Create agent with: - File Search - Code interpreter - - File search with file in blob store - - Code interpreter with file in blob store - Bing grounding - Azure AI Search - Function call - - Create a thread with + - Create thread with - Tool resource - - Create a message with: + - Create message with: - File search attachment - Code interpreter attachment - - File search attachment with file in blob store - - Code interpreter attachment with file in blob store - Execute Run, Run_and_Process, or Stream - Retrieve message - Retrieve file @@ -217,40 +215,6 @@ agent = project_client.agents.create_agent( -#### Create Agent with File Search with File in Blob Store -The sections above demonstrated uploading files only for agents to perform file search and code interpreter. In some use case, you might want to have the files more reusable for other projects. You might consider uploading files into blob store instead. -Here is an example: - - - -```python -# We will upload the local file to Azure and will use it for vector store creation. -_, asset_uri = project_client.upload_file("./product_info_1.md") - -# create a vector store with no file and wait for it to be processed -ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) -vector_store = project_client.agents.create_vector_store_and_poll(data_sources=[ds], name="sample_vector_store") -print(f"Created vector store, vector store ID: {vector_store.id}") - -# create a file search tool -file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) - -# notices that FileSearchTool as tool and tool_resources must be added or the assistant unable to search the file -agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant", - tools=file_search_tool.definitions, - tool_resources=file_search_tool.resources, -) -``` - - - -#### Create Agent with Code Interpreter with File in Blob Store - -Coming soon - #### Create Agent with Bing Grounding To enable your agent to perform search through Bing search API, you use `BingGroundingTool` along with a connection. @@ -464,32 +428,6 @@ message = project_client.agents.create_message( -#### Create Message with File Search Attachment with File in Blob Store - -Coming Soon - -#### Create Message with Code Interpreter Attachment with File in Blob Store -Alternatively you can upload a file to blob store, attach to the message as file attachment. Here is an example: - -Here is an example to pass `CodeInterpreterTool` as tool: - - - -```python -# We will upload the local file to Azure and will use it for vector store creation. -_, asset_uri = project_client.upload_file("./product_info_1.md") -ds = VectorStoreDataSource(asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET) - -# create a message with the attachment -attachment = MessageAttachment(data_sources=[ds], tools=code_interpreter.definitions) -message = project_client.agents.create_message( - thread_id=thread.id, role="user", content="What does the attachment say?", attachments=[attachment] -) -``` - - - - #### Create Run, Run_and_Process, or Stream To process your message, you can use `create_run`, `create_and_process_run`, or `create_stream`. @@ -676,6 +614,24 @@ print("Deleted agent") As part of Azure AI project, you can use the its connection string and observe the full execution path through Azure Monitor. Typically you might want to start tracing before you create an agent. +##### Installation + +Make sure to install OpenTelemetry and the Azure SDK tracing plugin via + +```bash +pip install opentelemetry +pip install azure-core-tracing-opentelemetry +``` + +You will also need an exporter to send telemetry to your observability backend. You can print traces to the console or use a local viewer such as [Aspire Dashboard](https://learn.microsoft.com/dotnet/aspire/fundamentals/dashboard/standalone?tabs=bash). + +To connect to Aspire Dashboard or another OpenTelemetry compatible backend, install OTLP exporter: + +```bash +pip install opentelemetry-exporter-otlp +``` + +##### Examples Here is a code snip to be included above `create_agent`: diff --git a/sdk/ai/azure-ai-projects/assets.json b/sdk/ai/azure-ai-projects/assets.json new file mode 100644 index 0000000000000000000000000000000000000000..f1dcbdd8a39e11c48cb03a3ee2c4bf638c163b35 GIT binary patch literal 318 zcmezWubP3Efr~)_3Y8cf8H#~0m7xSk1_9{;hI|Gk1}m^UNVbZhl%WVHuFsGNXX^sh zrZ8kP=mNzMx|JAoh%h?+8t(j;p28rkbeOCnZUlve*GT6ts>_C{S1ol%cm;?Y={4i_) literal 0 HcmV?d00001 diff --git a/sdk/ai/azure-ai-projects/samples/agents/assistant-p8a1RfPaN1HMCT69X8YoEjlp_image_file.png b/sdk/ai/azure-ai-projects/samples/agents/assistant-p8a1RfPaN1HMCT69X8YoEjlp_image_file.png new file mode 100644 index 0000000000000000000000000000000000000000..4a848009f829eb3fb38a724516bd75d8b8346508 GIT binary patch literal 176468 zcmc$`cT|sLdR?F((t8sSP!mJM7LX!crFZFN(JYA4 zdtaa;y-JmSpGkId_Bp@bKKG1s|G13dh|4Ti`Mx>d_i69Ee?|V%Hu`<^baZsvq%L1n zqNCebj6c0UY{b9(AnTxt{|MSkXxJ-T-LQAMW@|_%cg^0~+{)hEME@s8Lt8r&D@z_O zUM`*!KN;KGTiXe7b6foV1THIEBkqc@i`96M&DNJS?da$Zz9E0sSk^d*(XFAQle&0L z9r}M<sLb@sf529=U`@A_L( z4WwmdRa{*1)~;KpTHxY9u_AT= zSv1pTmN!`?yiIMrPSCq|7f0H&6=LK=>+Wq~^7Z$Re*XL?)npw_hoL6brTJ-x5=Qk8 z)ngOOKZFMc#yxwshgsA`W%K6E(lRp2XV2b_S51iCxNYC@Lx;{c#;fvBj&j+BPy=*t z+=ym4aOQbSiw5hlV;rok67KHq^b8DYl|g)}DY~NMi!Fw&rQ_NkFx$#QN7r+NhetU< zEvc@|k0aBdl)=!*h$$~AB_*XT!z7}!z%{{XtdstSAFK_cZadjzvhCcnCzf3`K34M4 zK5}{Ri}vN5*~gC@iM+KXk4N3?Axq)Tx+oddB6qiT z+t-r%*KWDFspi@ms>UfvY}?PTa`ED?$wkX94#O>qVq%8EaUTlZ7N%-0Y!^&i8S_|4 z;7HE-;xpsc7wR-PRZdH_of9uTZ$hOjZ#3J6=(|qjcQPY9C zD6KsExW@+sm4;{5tXY%S8)ViHugc*t*dR61mZ^N@$`$Vv8$N^G4<(@MNdHOUW)2!LFHO;Us&G128ef<_%Q)%fXZ*T9x#)N?R>7f{02j9VG zJVsRwM&WL1!;WHNV(}_*KKS#}WqOe9^l2sXX4#fBdJBF_TApfzH!t5{rN+iBC@ARV0{=`Tsx!2B<<{u zq4jjndCAthQj8Bh{n?X+T}-SqY9u{9J=1N$(W0x6LOwoEwMc|+fs3D=pS35}9^28Q z@3T50E^U{%cyUkS56RLB3hLMyRbiqDMa%OtSjf$rSZ+PX;uso;30K4AlMB$`;ml9Ip8 z47Wyzy3X_ur4*@QOHR-Ag$GE9swJqhv9V$0DeCBiI1abG$hGT>cVC)!`RaYCp~z!J zX~b!=x6);n=0q_am&#@OGCR^9Uu9nW( zW<8IjeSD_I{WULK#$Rvh85+{&D&^vEogH;Bc9$YshR?Ji)^;H~;WB$$?gDxG>t8nM z?G~UN5WA&emH+KG$FWYz%VuVAcb(EFEX}wHtWsSbD+PE_m4mEmmyaogWh|_a&A#_O z10CZoQcagV1_lP^XNy*nR1?$~^Bz5U@`NlYe4^vWj+K61h*1b{N=PY;kBvRP)U~pt zm+EvQLSH-EGL#ltFg?^9v&M0+jDkY5$I61<()>_L5U+{Pp~Hve0#u?C)e?@%hZr|6 z#;YX-xy%g5jgMPXg*!OZlW$3t3R!=;O^dyQy;ny!dEUmx#s$TVk&!XVsnfa7C#J9b z!-rQCSnHVbK ztYlR<7wtaR!&I;|(>^gf>onki^&2GQ5Z_xBs{Pdw-?Fjf0aNJXCMguwg|VU(d<(NT zziy7lnvCnbU=kwiB+ueD<=vE`m-0kxzTao%hJVZ(Nt^d2B_`$moO!4)G#&2+l?J<6 zU*BDB-%6NVW?GH7o%MQ5u$TAo+<2&@tQ_jzh28u5_3Mdm-)d2Az4CM4GkZK?_j!6u zp)GV1w)$iAHI*x*YyClLaZI>-o8>-#k!Ffhs3moTpA-)~%Rf9kJS{EFzPDm8S;FGW z-y_^tmYk@~iMj<^uhX=B+nH#CPY-QAfM?flGVtcyoozkU0* zPuMZBw<0j&)vJ?Eep;hh7VXgnrH@_ueck#^hWWJ+VEnR17pYCir&}3v}G_*K3DRguEIlon>x%w|hk|Q zf?c9Yb+l-oMa^^gK7MV4Mpf6&GOvCAX#bW1jS+UEz;vx7A&=#J1{SeIxnTZ*j$Av( zER?ZKV8{jEV~jYp={V7lrWi1PfRt3yJ`j|B( zDypdXQZAU|M(z`|jl5A65*Ng0=KqDc@5ajgV`0`B;j@KL4hX(Po$H?(sCVpo^7wJn z7ViA#PsEp7nr>~|zMZ>vYy{ukVX9xT-4+drT=)PzmV*a%`)9pO_sQf|roMDH%5xZs z#^ub_DgSlz&SC#83m&h8Ri^A_u@nYhZ(>P&rM&JSc4@W9N7}*7 zn>iNiuRA(s$%l(26}Zlt;tptGx5Wdp(+##jLqEv#?`7Zf@Pzk53Eq=lAYP{=~z5#Zkz< z|H?3PHh#C!ET!Ov0AF1oj}foUmy2>?BDwi8_Q9dTPHmiB4$TxUfg-V`se0Sk*hRpL z!iMVmahLZASSI2&QS+BjiA|b+GHpr>1Vm7|G2M_%p|&hVMMar(<~z?1r&kXL?ZPFm z4|VE{a~x^&oBzHcMto{|I#XEEhG~+!B-~>;t}#JM*of_S%r_)>$VN-02!eZ8E(KyR=W z*Tgh>b>RhNav#VBz(whMRem(Uv^RP(cAfo-@JWW zK0KRswLa(5SVdc=nZxHd_s)k$HRU_o@mY0t^mH?t-_D&_`7~iI7($JSiNR{robMA~ zah6(O<9>QnF3nXf79S5+G-hFxT0QWHuCTC>e#;heM=f)Ec8e|yr?*j^Qg(A{MY4D- z)`mu{qq}lQ=g~U4n+4oOi%R?X&0lFZ$0{Cno9hXX7e6apAE$hb{GGvRz_KGpQO|wO zFtCyS`B&Wa=hfmX9oN1V|F&&^hR2Hg1y#R-yVWh%^5lIzfeIf1%5dk8-+eoas-Zl% z6eegV8O(2yrY&Im^)j_5|G}JL)Ta*tSS#!t90?UOdNnU49t`i}K)nm%x6pb~$!&D^ z?%jq2b?(*jxG<7k9wFOa-O_qI=1WiyDPn~Sqry}Nw6mJw*7Q*Rq7akXm-k4CLTzeG zdUF4ME$+Nd+a;6lb8{v&&(8&&ymt4(x=%`JlrVb!M+?(KMgm`sb8^awFHJqAma{uC zPInZ1>hg_}2^jqTtvjks-P!BWBZu#^`$b(cHf}$t^HT8@r&dPI3R;*+i5DaNwr!k4 zr`OOO-$@Fv?g)OzaWY#*38B_PdVvY~EcKckzW`9;(Slx*R=YEzZ~ewFTi|A2VUIe&yuk*os<837YMj9cor6aG6fiR(~bsn49pSZ2Ch9%JRhq=s-a_Z^n(QHbY z9f!&%^xd&{$u_1sguBgX#s%M$`1tXoqPck@E$?n+l$UYw?KKQT^b6Fv@Rh~V&#l)E zOFle4ZQK9^-0!EZo4TNVBU20aX-a7%Fehngr6_4XmGm2O6uU}3Ru^44sSt4wU8>_q-M8*i)Q1gob8`b;9LdLL zG!HXz>o(vO8Tao`09B*p-&DkQ0w~Gb$7}q&w=$UHLLCo3tX za4|Ptr%|2u*$J**J@CS)*p}MgcO1^R{_#ihZGCpJu%t*l*gdekFvk6KsZUB)Hp5Kq zGW%4nETa^FH*=HV0yWUv`vrPvTasDL^E(G!`d`QhxuY`KmohsU_muf%IS4xr*O1aY zH5~6_%gUwUOX!p^mKyPOXfb>nK`fDwdQbcqZ`bI@vrmmnz zWg1lGd}rjvTcV`V9O24o;7=S);|H2aw| z8sJW9KoVs=H|ahxtQI5yGgStrh_b;KU)OD7$XNJCXlN+G=O&;A_X?vgcrrMaJ3DC{ zV`Uw{zN`n`wP5O>m;~0uT}Y^nxRd~-6qP*p=H8YdagRdx@1HkOJu{gao}XK{ZO_TG zziis>ICpM}bYviDOYPD2?3DLUSh%H7b;89^@(7Y=5Pt$*DQIzb7X#{r*Cvlc3QBSD+Gww7$rR!C&`+119Bc%Vjt8D-9XRg&a^F?b zAbP9AQvfvypBdmsuU4NOrA_xoJRy8dqA=iP8vDo3TvH-*T?%2P2VE;ht(uO7-}>@L z``x0_IZnO8{0~|v1nSX;o#+c^n{{22K6os=Je}t>rt3EG@fd`Qr}al)78MoYYDFCj zyZL6cRp<|P_2l7K$-LG5?9U$IX17b)@tV|K+_7Va*Zupc?S+Y4y=c@Iz^IFEREJq; z3m8=$paEqWwtcsEG-*na)r>9rarGZMu*Xi z8%%F6Q6G<1R8t&2X9JCDitV|6qY-&r@p zmF)XIQ8OJn#Yu~nS1&#sRdZL4k#9u(=X0GgA8AdGX=ygRabpnBwbtN?Xfz74uxzkh z?qylohFHZ2fZ%uMgY}d~B_7`2d2BGZFU;=Qk11!s#T&^-!V2Eqw&-IU>|ZUmkO<1k z9N=NeDDk@MiO)lS?X>In0Xh;{+`qrLs2o{RT59%v{IknIR6qvy%6V?%%emXeNcH(t ze21*}^3t;dsHvCG=4z~pmel&YswygEK|y4{!v6%J? zDF56+@ey(y?yuQuQ`!)kT-Md#Z5uOdZ?tn5*lC~Gt(-8AWoHXoUS>^ifUmI7uqTUq z4Oyp75qA?nV5cAknWC+0y}EkNyONu2)~*p%n%7}ah0qvQF3;#YzT7-Mzqt6UXRQX>ubP^gfBC{0)ZDOJ6H=Zg(qzv~ zJvL2fj0dMR0i)*oKv(u4JAs+3!oX25Hd$xe1; z89%Z60Vssp*NlUG)#2Pzu0*ec&bozpF5}jm?W6qy$+Fxbc^9Omy~u8IGD8fWRH$Hp$sF6|=(I`VVsco{ZtR)oqi(!|IoJ!?ydOy4Y2=Qhq2bnUh*3+=ce4jHW63~llaP%X5g zJtpaQGBYP#d;1%sv+md74t|hzo*NZ4C>h-osnF;6ZdB~43>D6xuldeM#>}EFWLnaM@By10&z<1oQw7yZD})I2Bc-1dQVXGo z+qdb!RWE~WQ^8M(xPea(!YvDA!fww(8eaex#U2Gd4&s$5kPB@?U3cjdGav`89P1Y} z=0`;<%T%$IMLRIBx&ilvnGrMU$yfR%^wjMUz`1O;^nXmah_&I8Mc@-(E(>+H{_w}o zM2fOt`uXY8r z6vFo0!a_NImy1iD{4Q+>de?M^U9})DBZQl{7Z*I^QE7=X2L%wi#S1>OCgZ9QL5QWP z7F7EIB|d1!sCCw{93E<{Cd-R+j)%{%-2AjArhY?XoN`Rd?6550&=gb+sj`COIpi{o zG4i!Vym}xLZQhpgapsmk_RIiul!i)yE+Rpc>!%*avLg5l+izr<0sYbhjNN6jQb5j6 z_Zpli&mFSZi{MKX)t5`VLYBv)gDq|Mm?%d`ZgUvzXtkUwo#-idz*Y))HTY)RK@kl- zkEP6!6)O~iX5hvi>m3V99k%713`}RlzkgZ>*ddRr6`p55@MWUm&FGimyXLQpg7Ivp zJ%Kt0`&2d%^)0ICb5tstAJveBC7hmTg9iA?l`DT-K#3JKx)0jLnF8UA2@>#$Fwwq> z9_!ICJ)$W=FryY}*T*PuHy)2pYbtPcD!>MFTnR(_Y<_<37v7uWSFyKc@P}ivnKL^2Wn^LR|es z8MH79T1MbJ+!xx~mml~d;aMSG~E?zMQgz1iJl=^77@~J&l zGfvjWxgmq!OxK<* zUbo#CDvkWuX>&Xoob@GI3F*+vz%G#;Ti&3DW&t65sax=|9kchf<$k9SD%AM7>s@oR zI%>FM+ZCXoOv}Q#8#L>;+awM~pf-VlzaX+1pp+SCan=|%ygI?jE<0iYx3f&l-~$*q zeM`Xu2{EJC{9$|^rO(w(1me%nj->!d>pl^g_?_n2zhTSnUF~!WFi}Qy4N^78LKfsz zqvk_#15 zC0+vMX)mC3U5-t+3ie2LlTPg|2sw_`KlwyLQ!KbxeNe=%_u;Aab4~ponu?9;)%zz7Q})A>JZ#H=r=q4;)wX@bHk3lw^lfM+~{g2So=@mP>RN z1^X_Dq%3ZGML6}KNNj#J;{%`Bm2n8NYAmb8rG2{Wl51^NGP2AQ0W#cvitEg;Om6Vjc+}u9`i5Uw_H!sb0rQmxp zE_~)ca^xa;VOr=5a#89B?s|Pe&tg-K_Wl0yd{?2{0}J<_zV7Y_cnb~5Hhw-n`up!V z=H~Q;xrXo;O>Eh+r51_tr6}WnB2`=9*7tG!OaIWtl_C39~ZNsCxZ{ED&CA!-V<<{2Lc~kNMYnq zYgzI*&dZxo)0GOXt6lD+uhXfb6Wb1)sTp0dh>MFeoToA!IFLy0L5-p=+^@D&15eTc z;eANK8>=75?n)-!;>yyP2ko?|raQi#&9k&5(!5b7XnEy5A@OH4H8n}|)^+Y-q)x;2 zHWd1CT*;yqMcS7q;phaP)+q zKB=Iou({jZqw2Bt9td*<-@iPNXW`eo0`~#+R6pBtZldQNzZk?Mi^DmQ9oxkfM5ler zGXY&!`5;Zm$mu?lA?S#Gru^#tRUHG}? z(g7LDZ(vLpb!i&gnjpcb5yGs1Wcrh`JC*QWQ;kfAV~ozw(3rH!3;pX{s7cD-Q+4#?}i z`2EL6R~$XPtdqZ2@Km2+K1WX%7OB59y@{?^E=2T)+)q50xct(W7-#5i7+L0g-WV|I zRlx)KmWcaVoZjI=&dS4a`yiz8f|bb8~Ye^`9E|hJ+~k?eg3gMg*umVs7)k$G6}Gt#itoLbEJs zgBi8K@Ji8X%)ka6t8bShOTtN3FBgju*d=Rx!Y7Fr=$L-}=M^9fkZsexp%V+xqAg<} zEi^CwWgNPMWx=#sK(nU^~=P0CEHlzo{RX$LTz_R}{oVC~VJ3I&tb#i|CGLf4o^5i$Mt zrDPt<#G;g49*G7bYhiPkb>!G^i4N5VLdO)MfPFbL!@UPpP&$7ejtjtxBkk5?CBqQS zM!v_qL1%eOM~RfQG`md}44zojsJM#}RpsqKt0VqnIo*#BUGmC6mgMv2&yIV6zXQQc zDVBWxgtF5DVYyF_v^QHuRzqNL02*%RdoTL~%mZ|n=sW8+nd0lsgFbGtU}wy*>Jk&3 ztl-J}_J05L`~6nC<`S!y7G~QGPl|}>fJLz@5(!cj0V4D2K4I7&MC*(ZIL&xVtOp4h z)f>vK{;c(VUf3iJj|4jVC%%EPNg&Jh$uC5%UCl@MKIOBy2Wuc#awN-TB2R;VxAQq7 zvoMmeM(vd*awL5}GRu^Oj>dzAW%W5Ti`={dRos4n>kU!j=bh=RzKTYQh>7Wv=!Kk| z!~7btmC$h5;A-;n^4?63*}aIlXuNQ@UYU{K!oSg!hz!F`e!N?7b%@83EBYYn3{)fQ zJrYVGH8oe{f_OjnY|7U@;s4{(R?rw`Ntz(lSjSh|j~zrro&aErA-txP^r>fN2N6NDrTpr ziE5ebf%WECdb9t9Cy|&=3qOXt540S4?w5@wC@+MLXTY}yGD`ViViwgQ*+Li}q|aT# ze91hl7VD*QBOd6%Vvi9wa|%(03o4kn<%b`R*Bg8lfX;P;^}<*Hqqr4`lo{*&}pE_TSQz6Xfa8e>5`xqp5;!7y{TGB zdDfrr(s&Hn+12;+Inyt14Utn|?crZD+1H?->T4Pl^ZV~V7;Jmx^8-vYW~Yu90JeD- z#b=?|`WXjB0n)ej)BaE|bOwRg0KldkvhX{mKOPHj@h|VzcgOwYgQ^P_cvflD6{bpbr^`?h zii10mM;_HxmX*~Jy9d!FnCzyAK?pfdMAkP$)sR`OJX49RBqv5LxPO+;YF^ z{_w@Z#R(Y%=SUoiOZAs*5r7Uohep+s!esQgdB`#5)uCPw9@L}YgmnI->b-iyHv_iO zoVqM#JK`Z($~tw}T)OAtZITx1G_+xf`(z^>Mgv>^_y;=Cu61JLuR?lT2oc3IZLT4lt1n zT^=Gv;_1NZEG8`#e6UKlJMlMUF+4r36oPo;-j&)OWn=qzZOWS`S?qS+Nx&@V`)*&S zmi*qKZE`=@s3FB7OBp|JeXa8K*RR*9J=m9igLn1up`{mtCU*as4n!RAB64yBv7@p# z{V2!zG$zuS^ZS?l|Chpm=Frx)Ot|KdaMA`>?)`jh%6$*Ru2(@ zlJ1E?^<|n%rCzSq9}Mtt2%!?7G3asB%^TI?zG*nUVMt3md`| z_BXNRt3rjDs8+bqg6(=z)o~%BJfmMq9*~uRpiLs0>iZ`13fCKoaQg6x)d%y+z0#vo z6C$*Lpc@L^T&YIkTzZ9zX=b;{O_$gUVb=eIrv84z8F&ptB<)mFtpx2NnbcnZA2Spp zA%{U<;>Wu|{9uSUy@D*52^R<1%g5yr0mEA-N_G5}jBUk55DfOO#-%_DH{-%f zQR2`2S>pLjND!3h_!W=6o%(LNZ-}X7msYeJV2b{<>6j7A*3cvlj;iS6w!KQD(WF_@ zXe$9S{qDhGsqER56QCv)e0PIKsr15Fci9-vl}@6~Laew^ z85Do}}IReTHjs)lF?)W@MZu`Obyu`&T%TmmsejR`R*-K}zQst1`SsR-mW zC22!{r})_JKZJWzk0Mt-btajZz0vZa@kDt?C@+jQOOi&Y!3n4fTw~lGha*p%5ze;Z zU%RAq26qLxj(8({@N>{UYO$x1NL@!lGR+O$e2&~o%c7+j8L_6Q<1M^1h~xk+>bSds zS&g`l;uAEY^GiFtrum{p7aT_wuER1TK(nQZK*_a$p+oW28Z|{8UEut3efz_7It(OznO_Hyixrxwp`l?b5b^15 zCB}|$7*xPsWpDyabOzt%KTxv1uBl19nhTR0KmBx;uvkw9W(Y*e(BLkSMt}9{^SAjk z$m7KpJR)vlw9sQOc$1f;u!?8OY|2Q1yZ?(Uz0$V8!$$RSik_N&O9jR z2(Ec{^2Kc-cgu$Kmti?0@^QRT1*T;mKX>k&3$nRW9}Yih34Ke1qrsNc2*e^4qf0sN-<9izbV);@ zjq23~*R*W`3>T)O3%j$bT4CQOz^!S^wTrQ!VT1aCdleTK%XE4G)X@rA<8ZN9U23dh_upq3 z*R{U*Bz<5hBx!*V@&{CfN%40SpXA)$eXC$&;1IWNzQ#dAN*2BRI`y``eSXtCXSkfO z(8~t9rfr`k@9g{;^^AzjVa~mK+w%}-HN~nb(fvS4)t9&*-l`EYk@u|Y#f42I!Hl?D zRuZsd(A$*<9YhheH@*$!x#9}niUJ_=$iAzd+gX}c{lxj4Me%iPPDie4XlMiu3m3si zDL_7a-|6eWbH+$XNd=uU_>B-H|G95EpA$`VcB6ebYVs9xdD_i>QAyHLz3|)C)P?=0 zVc|pVj(zi3gxAX?RZgWc~Bd3#k4Ar}(FJHa_t;V7i zn%!98LUMU!N@8Ln{ld>JzJMQXaw*S{1c2Y?ds7^lWwy8RTpk-d#^5H%_!Z0Q>mS+Q-%npF!g=4zeRR}o z)4`i(fqOGJ2^m7>&CvaTlW}K$?pq-vk0$IHaugye2u!vVrNd{`AIxiUL z>+jmPPwS;YZacwC?rLatw(}ytzj^oWop1Su$Z|~>zz8TDE_wgHtqJ{{da;`Np`yZm zzGADDBck19&CSjJ0`SU!@B?n5bS=!ZatfH#MOK)uU%fcf5H#ETtHZ@pkc1wB?eCPg z6ej9wkBBDKVKoU8o_?(7asLu1?(Dlj*Js?7S#Xoh$nzGMmo0!eUqiIshj z^!Y$iZAFuL5F5-Nhz^!ss-Qw;MH=ElRuA^3Ag?n=+Ukvf??p@>qHkHy{+W;tm`g1S zPA?LzYC^=^7;;Kh&$DOGY!gTyaV!UyAVJpp>WfEhl7FBZy+GPoCPWGcj2ajNj{O-b zA<;s}tiCQq;)~&jWTWaZ*07Dn(2Vj>h$x-QkgFJ2?uLl|uH{2!3^*xy4Oxrj z+(og_Ct2gGX+Qq>+pF@G*X^pH502$wEoaRjMpM=eLl{>1vr(w1sKmusd{aUG z#}vzsa=`_Ng*7kM8_692i`FFkhC^WE&A9x}PU}h@U@!uy|Ka6Achp{TK||&H{8mN?wJI|k@TtECp4~VXM;z;xJtVy z(51s}6w!6iZAy`7woz?*$j?){4gVvI@Z{qi_W4|D0%1Kw3(g9zg-kgGo|OR2rN=gZ z0�w%OLSs^u9tFw@xV{ua(CWe8t#1oXNgh6g!37VcSl(`v-7tTXM3wP}UV97!kk?{@5#K}n1Fs0T)e9V8Iy8uk~pp)a{x znG8c=5J3g&flNr3l9$e@T;C95G7r1kHzZ_uVkiLUYqmlNA{(UUxQaolO6NFrP4h$e ze5-YE9#Pc;S;)#|Q;j*L5J_!kWu|@_BEmA4G6f*x-Di#PTNC#a)yx7}63$>EBH_YY z!9$1Mj5wU(ffwL8kPY8(Qt8P|4k!>P2B)Wok<%j$m+zlFN2)}@SjfNvwWr0IUVg+G z8{a=~4$R15LVyaY_E3I6b06be2Tc|77lVf|z=g`^>XONFV!88Eu6aBlnQ*}0Hh;K` zG7h?<@u&f9oZOme?>%jX3I$L2;Z}#4 zTcK%|xBdD`7V&XgAKh9+HUSd~s$blOA+!+oZAV?8CLWZ-E^~H-9gUu=7v^=QQXGH8`@`-Mo|4L5@i$sz1 zZNN7D)R!c!$!Y_*)5d%luZhdZ*=*1oh<0B%*Sp{F`gLCymUN?PM`L!znd}ds9jI;T z+5!geAIb=Mo*1+%1+HY=v&UE)@<R>mc$~ey(2$3?2f@Ss9jBB31d`?=C z3vsXB(rihF3XW7F;=(0SZtz>Bh^Y8>Lm0Na;H}Ht_q*H|*H#ez#R^{Zo2* zn*hW%f%ls%DX$+46PAb)&OMNWmgV$}J|&;47jjC?XQ#YR5|dw`YpWi<^tb_lVH&!& zU;h_7z@0FcK{n6dYAmv8z1F|{E`|2l2o20IdlUqcI1xI_t;3kMgAL|nyjvH=(B<!UA8xYfxMlaqFP4Att2*rFWfr%Z_FyYJ-H z+lAl1Z1IfS`oITKbhIoI`AgT{?pfxOLBSQ(kqX=|Z{{n}YlaLGu5hM_bSUkyBc+NFPbqKZ`lMm4t z>|-awr0dLZEp*ciFbMGURp2% zWX^Jcm9B*GEaI9FCMcOtvaS0CEE}NP8oCye5jkHUAElQMwpg@hslsNkU^)5J$HyL& zBo5oa6xHnQ)G<_eu5WJ>IPL=bn?Yrs$C#j*HksBMc;R_a2_X)Lk<4Z=7A+(kY7u|P zu<4d;FG&Nh?)TTWObwo>&`-@-5>>fKvSN_2<53ygnuyRt23>$*NZh35v@5O&k+tl4 zD{ON9Aqb-6c_YyTgoiL3Ns%Z3(+u{I5+LlHVbaE$HMZ-5RYJ&kZq#9hph)`?R zsQAj<6B0jzSTaBWiL#+z$-Tw=CBUfCtPrPDU9EP;T#ZouGHrXLsr%)n4In;|`-fMJ z2IM8#QDTszLZtvswL3z{X)E$5!b(j*y+Z81(o&qbJlIp*asCtI@V5b?K90U|R8sOm zjx+_(EEa>DgKe1!z)=Xtri6qXK)seH>0C6+SR@)f_j{0JEQxpOgzZ)F1)Yq8e5D0{ z!h(jmR>JUhxrRB!(erOj0ej-J?D+08>pGN_J%|8a`qKWGs3-_v?(AvKinU}I%O?*5 zUqo^vaVSec-E*YSqg39&EE6(xgv}$O{Rs8oB|3fUGBcudBuPZ(s-Qfjx;mPNoicd8 zYfj`mj9NF~D8!3BeSLk8u+84M&!l_v9LT8}DdLFFAi`4Pc+ezM%K?*F#N8sv*_SV0 zP65RsS0g0LqEQp^L@d6)-Rb48_Ju zI}C#l;LtH3W9&zcy!-4NS2fe~vCoU{iXxe7KAU?unz+_jj1&@erOjrUx9Ts9e}#Pu zX{jHuTH>)t9smvTtMuyb1;J&!fU2a9TQxJ*l|npErBP;o_(=CU1>G@z#yEG4vk29; zJzAyARUBPd3*!a5xphyl%?fTHfjCpsK~}vfstSoKqMlFVbGA2)w4`3Z-Ti}%52_`_ zBTVEe1bHxV^UkCFh-Prqe#1n_)Tdvz4?@vMO_Gt9=bB=uMmyB?-*%F1_HGXZziB|W zwkD{n#;}v_$+xOznyQlfz)*i1aV1})xPV7kkD$>~#EzuV8f$B7y&gaIS6LjhD{^0& zLOr2Ib^4GrB8ermcRhv}01dlofYyPeOvIYr&ytT1m^9PLNK4n<-LQpZLp_b`kcHEk zP_c9-BZ@G=TW0jam?kk+wK}1&{}h)5#z_6S^(>qFAs>?wZ7wkQuRo+hpD;uJCt;_+ z1|@%l?5AMu86$SYQ2h&nI^kG#F}OH-f5re+p$~W;rnP69#bCfg{pX*5?$OD$!>wqB zm1Dto!_aWx0e4{y*n*B_>51xSo6DDff4glfC^j{5-&Lg&5hrdobVDSJU%u;$G2}#= zd9+K`XCz+;Bh|3AB_3ZUiC@%9jbR%yGXTG004p;)*hU|!SC-_B&T5Wh$1Y*3Muq9q zz^g%@8^~-MieGC}I0h4dC7MtrSbM@ek#F}5-c0$1J7!9P^}#z4=7KGkeg4ur;e}hh z17r#AIJw~v9o!@e?(lm=jnWE5_}CHHGZ)tDzGS8#c3Wx3kI>16<9?2#!vE645P zM)qjDRUD=Y9fyc%Nb)+@j+7&9KQz9QRcAXR8vz>X_pvaPNUky-heLDWqCHr0lfpE! zzP`S9WeQPSp$rhmT$OqB6Z#b1To;q3iZ7~_(zKE=jKP&LN7no!W??lXBu_RWU>s94 z4C{-QhY*y%aa)cLdke3BS@N1*b3H761adV3N4h8^-)+)yv-=T)a;|?i442))cTbJw z&qF%C$NvJ2bqWHW+1C%=Tr9#!pP<|;Be6m+YKfJN?Aeum8#u5d(aI-eadCJp+IV3l zw_M{@3xkWkt8I~ebdduVAi-S0oDsYta|9`@l_Z1SXW@A`peB$g& zkYb7d*$5ZswA(?Y^c$5^L|+GjYD9In|Mo?~(*pi?12UtC=d^u3^gLmeGHYdACIlM= zClL{fT?^0q(HRC1My)5J0wkRl(ZdlU6d0}W+QOg)lj%GT+>QZ9tPOS40bc!gzss0@ z`2PKSKk>c^fIt%a9B>F3F(TvE85tQ$U4dFV=H};3k)tH$qQ$;=a7qr)7GNEWhp9f; z%}LH0Aj~~$dpinJdb2R@Y&Zs+O*Fp?3xQTuB# zkx#Om#J|Kj1E)zY`$KP7iM}Etez=@b)X}^Io#?a^k3h!y5fFN{lnDK9wRs{cdOV=p zuaswUf(oUDl(d$m6KFp-W*1xq<1Y7&MxAn*GR|IZCu52!CCQGrX<{B;%Oa3K;4TL8rFD2^c3P_@Hs*L4_n|2uu&RD&xD@4rn-6 zeuti-03bxhb>BLhxkD>i)zvW>9T4p_24Jy_aDgeRI#HP&#y_1S@B(J>iCX7Bj)ilq zy=}5P;TR=z4c+qtI+)xc<`yWAg_Gf?_3LAC2}qEK_?Uy#*OMG)96HfsNBryS>oYL{ zMf`Y#sG(Ak0nZ2=HQ+n9>oRU!8=O_lR$DlEY}~Xd0r-k!c@gTf8}cthJ%RN~X0^$DKvvv#4md_++zhpdoHD`wRmC$3*4K-FIjsYl8C+!o z<)E<~KYe<_=aMqrsF2g_5gb`S4*vl7CoOvj&>-*Tx0h?zu2mtYV_=F4p@%5)ZF&U? zQ2vSg4rx^p9}Ti9Qq5)y`&)8s3;=yz$v!p%^!Zf02J#}Y%*cE;`nw8>*F8_q1O%lR zUw*5~Jw)frObkJlf0XbQ-}dT9RMAoSBCZ_~ux_2)L`QxkPf4A?f25N>@m^&XvVV@C z68U);0VYC6hY=0`?|i634M;Iv&9;O-WvKe&>NC#9tsWtBn?&!ihBW{;8sMyWb<6zVm?#S5>$2mC4M>B!M$vFv}EG(lbYgb>Z_sau_v|0F9RPcfuOF zaQY7U(Rs&7&VRXr287dIrhhDWkHZ1*x`c;B0VM<|8?HW&j@1ZWwpzUe9>t+vIF`eG z=Tq67uZmd*La50UH<^k-7L))_GCzinPRttY1P7=kgBW&bB!vvk+f_nfDnuoHH{TV>8 zloBY)R`s=35*x4mIQNUx*X>8W#h+bVzWVoroGr>f{draK693~vdrGS9*U;PWGW+{?iBkj|UxpUI@>Xf!DmJfB3(AH}X^p(xYeZdHeg{psrhg^3K1%4A`^m z4^dNaR?#!Ab-EA#+h_i@nErQ1FzNrf;ENZKKGMZ8AeeWr2Y`_c`1^1uksl5h|Mebp zt4BMCb9i1~tXGsBU-`H}e`QKl@dpoZF{T~10FY=9t zLPz&?Gd=xj%G&$`dw>1EtZJ;kPh9`@=Kl4T#lNfq>3=?4J>`l~62b)(TG@a#+>-x( z&tPXWNw6#<9o@wL`?h(|Xqp+swYHNSaDz=x4lKYuypabT z{?DIJ=iqj!oaF0$BtTiaRaoG9{=lC#oR02O+Iya2I+Z=NC9^oH2WP!u@dGp!45l>M z?;ThAKkLNm6~&OO%AU_+ujywu^HVnFYoJGANsucb@h>IiKkCcq|LRCF_h?v_BGfb= zxd1wDrhWTN^VZ)y^=~T2>Xzv@N9E%36$Vq2s@6~euq_VVj z&)RKvKu5cF?li!%X8Sj9g93Hsno9QfVAL@#2r>W78$JAA9DC*!KCqVVCk2&W&3e}2 za

tJ`ew;ME*;~!AQq#`osLk8cF5F28))t&2;!b7u0_~+vytp#lOGv>f&R6_~Ons zKHgU^NL>YIU}R>t_;0>7!4Uua`5$l2|5TG#&tLnH&U?6ud{~U({x=`?#TvD5WD&wf{cjfGpZoCTA!i6zQ$;IF)ln7*J5M(#D-aW6;+u_F z@O5W_VzI^X3pn6K6H^WMEbSoqn0$G6AE?JmWj(jta3W4PdZ@;$kTDG}8X@akjfn1#k@2pq z9=PEo27$4bwgT5|?4Ki22#z%&NjlZ2uJ^GzrJrRE89qLoTViO~!~fIWUw$Fy8A0lm zNli^9v)hn0kU&i$d;>>~siSnsOs_2&VEQ9CDtDlC8z76{tW!E9QS@Jd{{k5PTHj>J zfFU-f8!2G0M3vy^dJA%Z4=9HjTyCF8;L>>D{6WY<-U}F_tkt$EP(A8k+4Aaj^4#>h zoeKSk|EPdAS-BLL40>!HG*@Z-`?%a>Q#rF!-r$(8WT0^vnr7e z;=A?TI`oXqd2+M>Ii!SBdS|*a`fab2(lk!w!RRwD2L-ojPjwpYhyz=b_95|i z$Sb?@ES7I{;tFLT78=tI#7_b*Sv^D^0X>w@hC-V=n%j4{v2v#=~$B|XRifSA1EO8X5uZ1ao*s3nxV zex;GWU$5uU#unByIGJo@2>3Z~B!>vMBSSF07^!Z!!4`T}FoUp>8QRS|a%yKB2wK~- zN8_s)dI6G1svpBK zFeoDj>y#Blqrf?t@}3}w+!D?1tFzyEH|A$h9LV7qn1NT_u>b0&?FTd5C=I?|bEs~` z$!pa1L8C)%@w#_=LyBIJ%EI60dv@O6sIh~j8P}}axGT4kOmLEwh%KHQX@H|M2BDkR zc`Xdx;M^^=)j=@pA^3MxWO|qPoAKX|hZY9KJ>hXD6=-VkV z`UNi7jgIQc;8_gq!)4NH_Ks;s;`GkO#%nJ!XWcIoNoWaCk*cdX+b7o)wbw>E?M7T) ziNey_HzAf!1zwgn&Nh@3MIRZ+=PNhpR5o#!hsY1fz?d`61`6(W{saXE6jZh zW1n#?E7t>$7VQ=7`4$=eG^Q|yoW>HTRO0y5_gMb1kCX25Ve?^x-y=qh?{u@mb>iLX z^U$?sNeALyGy3w1yg%)`c%MRWfA4I+b$h-wT1Q4@r4SO(fVoJdy?08 z4k9?_^L1*B`%34;M8L|%oyC-@aPhGz!PMKrA)>BoxI$UmKi(GZq5OnO{WFG}F^@}% zyywL$DJRsxxAK1XBJ-kUl#U!{-}`St}gmpsUpRq#L4YGt{`TS2Rdl$r$lz&$>0-ateOJt_5A9 z_blNVK9R&af`pT~u*|5g^8!woR2@de6L3V@ZfZ4JnF4YN-bYB|fU|Cqn)0z8L0%lT z5eG33!F&ntc9-^DzprBFyQiEFr{^zeUhVtsp#bG>a~YN6UT*BQwf}}uA1?O#$H9Zi zHLwnA%UbtNhB`Yp2xBe|^^=_)T`pf`5&Xh2lt_+c%`z zBFv)*gCSf`-ToF8o}FDc{Ns8EcTiTcdPvfALJT&Kh2F6vN6y?Tf~K}QLOS>T+m~=Z zawa3}kT(BdUXw4cy2#j45Uciq8)UjDfVQd52|((0d-{)?dT zm-H~c^_7fo(OwLh=gM?;L9DjWQ;%2qIKWTR!zlPiq=+&N0oSX8oXusShxFfId_uBq zUbP7R4_j6hT&$6K zdJ9aVnlH>_>2WxQquHdn(GVituyLc1d3}@&Q+qS<(j_G&y%(UC#b6kQwa0OI^2x7% z6Y-Px(H$>ezFY&v%A!0#h@PFDU78#z%IP%N@NvKq%2t|N0m5a}<}>M6U$1R%CMP1` zeH{iP;kiipV6Ub?D$32T#>!?C(%N?XK7V0j!~5(YLIXf{cQG*;{kJq=&i6X~TkN0$ zFv$SfnMp4>n5z?+gC2jh>?vqYtsIU3`{@3;IdambrM$0+p z^Lc;X@B4mjHyhfbFU=A5k5dEQPtBAQN!-!ihApQ66-^jW$J@rP^E87wnM9KtZIEGN zoGJP>NaQN{ELx;NajNT*0PBJD5pJq>iYKrUg{)O;*BVh@0EIf@2PsRAwsEkPD0Q$0 zweNNeUPe}RSin8%8?eXWN+b9uNHCiX)&$`H-ih_CIIn96@xPIQ zTU>of|4J5+*;y>&;VE(heG>K=1r#m$s56a=AQI4oT4}bK7n#PO8E*!MD=aK5M|rPM zJphfR_{M686~5N$zQy{^aHvN;>V2+br;Lm|h+}TIHf7r3wpniQJxo>s*qwKQ^g>YD zT+1#pdB9T1+i8b~;UxllC?jEBUi;k#~w_&5-h@7NT@hoM|3@cf6_7|K2P0G z_HL*zswratts$O`D7b!(*FldGgeGP-I)1;cJ$oOc_~;N-Y1E>T@PP+I@?wCOrnp;* z!hnl~6*n>hZ41oq8aTZ-L$?C_y?jMWLFBfTFdTOg{B_Hsb_+R&P;}#|^}q`nw?ld9 zXcAvNgMe4cwT;sPeTq-SF}5P)3+jV?NIJ%rEF>zbgf=n<0V+P&QQWFZU&FVOgNsQN z93Dkw%5ngxPr<9y%c9XFB1cw9xTh3$xUCv$)%b$PgiIwki|`TG;~83{DSuS4yXVU( zNp@w<7HSNAE&D{01L!1d0lmhY4J>xA;QKXwz#l**6t*3(DH~B?`etxnIORb??s~qQ z1v%JC{gf zRG|Lp4sLz)poFpTt+6*8MD0~$l`m%ed6Tj3M&VKy%?y45XY^kfcyE>|3G>EA3#32ML=4Cul)Ag zwtn6$XCX3;Tn?&|Ln<6a7#sTnV$9P`ZWxCFRE=$Ao;B#i5$RO0}-$qw_IM+~DM zQw6mPG=tFJ|1S|u;HEuauQqiT!NU%aPDL1P-?nYr;Gw!)Z6@SAx$z;8zhjBv5;+fB}BX5Vm2Py#3{vV5;5C0!o3pC%0X{Ul=;D7B@ z*KY1|xDyJ>t-q3*lulp2z7jOa4hqd9bcTsO!N=Hy=M_b)UmqJHhj`mm@`h8pLmSuM z3;o}(_f41dIXf=mn*P@&1{8uG4WI)3p+&PtbBmg<#M`kUi*ui)9K-USaGJ}E_Vua% zpQo^N(-E23XT<5vqEhMa&2s37$L@O69sxz@?-CE5?!fB<+$C>rQ~5S|_yjrUWq(^O zhoxB$oQAW|;n@Z|+W$iDSuFpT9D5Ix&qBWi2=ErbKJJ*$EMLdXrrjTv(d zjUYS&ENJEw(SOO_>O#R;MQJ10)CqNj%MT32M#`2UArWA8-}_=+!2Za1atnt3PUvSy z?SQJ7Bs+*A2m*pC_);H*?oUt?jsN6rWfP}vRN@qfDUs$Y=@9vP9p)K{rtnho!H;-} z+1B$*Af%!QFq)=v)T9iOVHkYn(Hoe?y+kdoKqlXvJ9l0!Du(KawCO1D-(*V{gsS<_ z4+2}CyYDi{fnDem;?W&yhKtEe);S6h3Aeu4yNkM>^FYlv1I{OzAqiigp{Swu0zEHa zS!WrH3B^tDIbvqL`e{* z!(bqOnS)^N-RWfUKnH^XKO`vvyts}{nl@5=L9WmC($13k2)_ZT@(^&zW)2200f-aA z9am7+QM+L4Fz@85z0VXs`xq+c~ULPg!Cbkc9xYW1?rfZy@kqzK?<% zm%R#h6%w}sajF5{zAyeM454t;9heItE1OVvl3szkwTSPe_x>~j&kqT2$>CMk4pfci zm=IXZ%0?{vKB(1+{yLf;N6mgk!Pt$%fsQ$ zFF$5Q-L6&EHLie|KN}792xB{zBD`Kruq=ka2_9_$xubb}9pLvS=*0_N0dMUoKv*o0 zRDu!6hr@yYFq;5vEA-0Glj`xg5#^$Mo3c!sU(;v=&Tu8?bLSMQqya01L%PLMq-=NS z&GA#rD1)pOkgHvn>N3K=ZpG}O^HE&jDR%6?NsxFTZ?*xrRI=>w_!=RgZobcKlkje$ z^-rSs2zzD=v}iNfFlQ#vL<&8hz0a%lvK5j7p8=-yG;u=^7M_?QS0o)QU_H{KZP3P9Vz8ZBz_Wf0GKfp zNh(#);jw+8xUYdB-|YDp7_6crILw5I#h2<|Fo=k9Q*H;1M=Y1_5rk_kVg~Y zh&{^1xhNDT+GI0eV8yy_ofxk9fthr*ajXEjQ-?EHWZz{h(!wN5i*@>U>OMBCWP=1h zRs7PWbA0jII~ICT8&y1Ds|1CtdA=wV6A!j&0$F61FMf$V%roD0|IEKg#!@D*c05WA zL|THbivQHYZM;z|^dB_zv_CW&18C zw2VVbk2;e-$!o{cTaM=pQE`WL221Q7d-+0e;FIDt`un@KLJea1e5cBm8`o}L5M-fu zRxWwd5V-+rh6feCvg~6+B&X`divB6j%Acbg^BJrRK+y{SsUlf6@h@azuG&0wf4!g~ zNK_^O#N3}R8t<0|u#*B3lH&jpe!SS?JhdTcdvZWRM_z<>DcG6cH?~Pw)!2}Rk{E0V zNNt|ju$MIuP4XCFGPv+ymKra%ygey1bEEA>tITBijT>9I|BWtz^mE{!Q@LafZlE7% z5b!6DbzfeX`Nd;dvsC%W6OX)l370e*5xqlT&|s{A5_3L_oo7gS5(H<`X@OdFGG<=j zcnTov1@UJGY`;vk1GOxd`ebRVW%Q`oExyZSR9Y_EFV(;LBw5?uKDFYnU7vWP=PkdpKcbdSMWth1gw6WKq{Pbwz<~vm3y!(qTE`F zd=z96j){{_>s`|3&71Qu_NC|}j>Fyi-^gZTedi%xvgjk8ea*?&^N=E5)EH05=q^M* zSeEXAq_%EUCgHl6|AL*2P)c&|GARs#9L=X~H(dGJ=u)l*2*y*z`9=QALpe>}na5Ku zxhzRbQ}1qhwS*hOoo^)M4BE@rMI%byd8~D0TXp;yM)iEr?Rv6 zVf!jF`!ajHXP$fV5EUbf=#4#ZkTKT4LGxP2Q_56$VcUSE<$88wz07qiKxhz7my~kY zizOI?QoXqK@&@8AvMAaO(L^emAEjaNB&B#j#86q`#WR(!O9W9DF=F`eZL5UO{PAPq z&{ZDGlO(v-p3vbp5*1qLsc*3rPPJ)(wxQXGnY(n# zVARoI<*|Nenc2hN_zW2xZ zg`ZoH1rk*SN1lgC0n8KR8Dl$RJOl-`JhRQXyksLT1e-}}2Lx!J&8dU1ycwCf&s|Gx z8l;MbMI0vI7xWkz(aRv^UU7Dm^D1nfAhkn2k33{H<|n%NbbVx1pd5&y*dR|}&(G@c zXazu_V2FT>XbY5LG)hMx8s@8LinJ{HnEa(#gyIlPE~8|o(QM=@GcF;w(~dBa#3puK zEh<1Q`iyWQ)J3adq9TTgMQt~F(nta0S9VzB8`nN(wl+AV?FZc}{Rp95ty#UqudLNGo#T!eO!~LXCL-<8_FW z3V@xm(9c@?!0W;A+mVqi?a31*9N_n=7P6=p0DnA7Ci#thG17vbVG5-`wO+U%$l}X` zra&Ot=*0~`vLoVXcQYK^vvF`1`m&Dq_o(<^y!*K~$=FdYN}9g;8!QCk2oi9bs}Z}WE%UYot#8on-VCVC?X@h8Dby0k@)2- zhy-YmAhu>9IOEWV!Kv&?`XS^XC$8%HmF9k7TMWQjqp>Swh@de(Fslh;xGAb0lrtrQ z_V!(4>bNvSihzLw-(EbUwb=IZBe&g4)9bHEROpcWs|R>7Kqjj;frBp!mhUC z?#F3U2-|%a9=d~rxEwp?-ZvQ8t*Ml}JY?dNH*v>$#g_jNo)z32CT8cO`f=-_U- zx0?HJ`t)Y|XW%(V=g)~YdnCO<@kf)EJz-(mE&!>1utcD0f%Dr2|4sEP@#gxI`OuUTVp>`m=gNEP1^j3@i#1rhofgn(|k~IoR7+1kV2ID{L=l#}m-572QVnkE$ zf=vL8YOz~nBSKWRsgNcqpx_FjEP_Xm9+?G?=KjW&YsHgwK)h4XF`4zS;TBnkS412mXrlxEQDBLG^-FL>UECi zzR$grZmP;jGDC)uC7%u}nnS5*QijO_ftt51o1%4#G^DXBVUQ>nXif3^FuPU& zr;{oO{cP;S*!^P{Ldr=27jV#vlktv5v{Dq@7@5t#EPufFU$|1ewqGJqXRIZq1jZ_- zo5lPJ9Fp=9Xhm=U|3edBQR|c_a*2AzJZ%#rhjPb{L0hAO!*AD19&do0iL%7-@zIOq z!KfMti5m-XE`HdH1zF^J!(m;bypXZP=)VwGrs@OV!NG^W3x8tV!GrvxZsV%z?W~aQ zsyx5;_bfv1Fn6E?Lo!IuVtgB65wRgV7$*6J5$;>tvlvGt5Xk`edP(2A^y#9>Ixs1+ znG~>4-Lk}_*r44S7G29yl0QtJK7B{jR^X}iABB7TBbXs26(g2HdL=QA1`8!t3&ozffQo6v8C<%Q zz6ax_Kv4@>RtuAoULTv{k@N*oaBN@H9BzBAGR~ZQy6x|oPk4t9U(h%CMwf;>gM>t( zhUV0saOcx>L7G)TQU3J)5tR{;*@bO{hSShLpr+mL@=MB7^gUF0%SdsWSP}QnwO^kmQo&pt-+!tLJw`%b)dQs;Sw0v2le^Dc;tK*Hq#?^Ut0Ak z2$y0>C_$Wj)v%*K0~+Uv>T0w+;To5QOnh|*j5Kz&-SiS*q@e)m_a!z@s|Z3l zsb$}3;TD4%?+^8Lgjr~>OXte{ux}h_4jAfrfa~U4T3m6BrcV6sXS`KUv4& z_^F>zlDx?u@*I$g46PA5+4Br7DzEmgeL<}KrC9Emgl>uq=@gMl=V6wI{X zHIK%So@%pe#>kYCK1fo$D@-Cylk)cRqJxg+scIhQ%Elcgd@wCIgqf2Rf`RTTtAmTL zFOS34^rTc~zu`^c@YuVW$9oP|hQkCi^?{*4wAsG66|uGFdTtft{R$JCA*yihHzTnH z5wOPfl1zv6^*`l&dSLu#3JqLCV_$>Em^#_pzm9mda15|WB32X`Fn+Z}lY99Y%P!)Zs8se^om$EL4ewjHpY7OKODtw_|M9?r zsX5%X5a#a3b(xHT4tM7~J+X+mddT$u7gx^?RpaOsx=ojT=oI9R8aPngA6ioi1}7~b zJ{%RZ{^A2P$3Q`yHI>x zbWsy5P9zbnpToz!dTxJA-T(U>>~>Up-~-|J>fE*AeWI`d&?v^WAEVV0t)r+6hx&8B z>xiCTbx1#_K1GL8bAJ7OnSpu|1v@~{NMjo)`;sK@h$0e#+zL4z*%;`!z33BaSQ=)( zG@X0HQuH1fkBkH=v3m7tAl$RSpwN#4Dxm>HfdH>9i}FEqfQZh91~&*Nl?EsloJG;L zJ@16UIEsgLH(1{22*RV_4M_JZAT6eKS7(cHI+B=XHx_b#`jy0-{Eks?gw~?jE+7XC z0th;R<_g-`!USajNJJB%hOiT?lu#PBpXj8YlJLYhGL`~+$703_tP)&MnOKiDL{F`^ zA6Rw|(g}tbdm`x3P?v z1Q)@+<%`RrVXX<_G7yTnA$dY9O3O+QTf@E58ej0dP7&Yh%_$OL=OGj_7*WX+Bn$Kr z7DXX+L=zLZh#(O_e4-cug36-jFJO(K0;qsP5`{wzkU{EW;mReVIkC0kGO_Y`7(feh zgGIBBkzoKn(0=yPrA3QBh9GmH4MR>4-xkg0o>Y4$nHepi&>eSVJzhZ=9h9hWYGCNN z9CeOpKHo(@H9E*rgm{5qClIyyb1i(tcY!~}pq3Fl{Xp^BWRPIQnj`6U z{qZiDPxVWeg|vO?`ds_>`ZdD_t(&GZ=+1J!d!y5%=kbSmkGnNXspfZ`s6f7%QBfx2 zOy7t3%+gU4TD>|~a`~kJV|f1HJvM*rv~%-sTxe`RBHSYK$Jhk(m&}$%Q%$V`Dd|e{ zm#GOIO`;7R=6FrO=Oi0OO2#udBEm~?#v>gm%k=4RrJ3n-Bj@q(P<0nSXwdwRL*?F% za(y&)&UqQt>GqM>h|qcoJE_O#ygd(mkb<8{3V#cnq--jhB3a^I2vNhSdn>;d1a>0H zMJPl@Y;q}>7nl`-3Wul|FHWKOqbk(<$e_3M)1|$JxPSD)!C4*zr@>nm68Z;lIzn|Z z4{no>#94AiqS7E)1@bYZuS(Q}?97>|>c-F=x<(%F`|BF25xp=(brY)2(1(R6}xu7Qz}3hZ3)R5&Fz!YvdYov)k42;m<&pNktgXg};Yv=wL3 z$Am0pPgv=c3pC6f>F@vo>DC$^8>>p8TPsIN`NGzKOSB7dni$z^=V-F%GoFWbzlU@L z!AI1RI8}0AAUy=s8nwK@OfesIlepE`qY7h9eSU{pg_1P0B}VY_?nB5-ySsLBTV6yH zWIou0ERz-hr6|Ay7eQG_xNg}SmX!E{!eHy)ydyGZ$|_kl`_!+KRSqwAadn+hT{U$H zL+j6;g@cd|!JZYBFlL8l%41*X1xSTMk`|;h`e?vYN&#=jWjg*sN%P4z8bbrHDLqP8 zYz6=x=>4jyk<$x6T;s9}cm$B|^jryKiFY>TY;?(xfVc!HML@QJ+=6F!rk?@ZLsP+U z+OXKv0id(U8MSuMI{m?GYeHYGt6exc1x{hGCMKFg1_1WszDMq=f5iu~4Lkc{QUPkw(7o%S#z10bDsN1XNd}gqkeFChVSLY{z({yiFm#C&-lX13z$+y# zb}e$wR|*uX_3Z#|TSb>55UfqB1-)hA2Pk)%6JL^PM^O=a{q}{rnECL=2KJugh7HMu z0N2R5jXf}_4wR#jl1$H*z1;Yt(ZF{U&zCb!s3cJG({1fHJ9ar>3wYcTj}IE1{m(3IbL8btMe?ZTF^r z@kUIQ92QZanG?3!&ss5{_13gIo<7|umLj`H`wb92Y$bBLfkvz;VkoobW?ZK&5M^}V zTc920nU{;$OI&TJxdwv#vx&__0CcD4#aB&u>V`Qp1m9su@gZb*RE{CGiNMRpm@&8lzD_VRq>k(#f(nH*(E-wNj6+*WFF5=}5` z?UX&>@cHtg#G1x}7Jaj|wRPMA9Fs{87GvSvouLIKMuU|Ba;7hLNhi-nsTgTKv>TR|20kM@zj{Z z4o3lB1_cJ5NWTB~$unns>?h*Hy?RZ1G>;Z*!vWvcRA~a?T%Yzz_^wPm6pty@xxH`rNu3 zXgHa;qs*}}g|z_&0+rX6c%V7NZi z+Tp~3xm>2S9cQH5l(E14wyEL;rFw;B75k+s?=$j41J$b4P!y9XV$N+>Ro~b0=^*N% zrE;GOao984&pW+53lbxuS?WX>HaJQLmzOAa-ui-O@g9t5X(-ixeGw1c2B_Q4G3%ah zx~cxaMMHVZjn3;dvquD_@!yr9kBujOyLk`6bdh^+#hqjV=A@ePlc;Y?%ckjwJSUwZ zh++{I2#nk_lNRl3s}3kAlr}drI~XB+JB;AZgCip$4OLmHzjf=@OCeD>Wt3if2pTR7 zU_v_=o+iP4ojIR7C+UROw%N#z#p6lBpCpJQSZnu4eMW)gW;4Sr*~LU!+4A%Q91HX) zuzjIlRbHxJE!>EXfDQ}Z$~o6D@(KHkiq+{63LL`KC-U!rKHG@S_SKd2tE`A=YMAuSacB?XDKVIJv*FT0zbBU5qj2WwX|)67 zXN1=xWZE?8p(507S_eO@28vSkcV!23O=0?1tk&Slh3|U;FB~S-=HrzybMNHbm z7pMs+iiX9BN~o0SQls&vfKhYSqMgtn zP}C_77(szVv)y<|T{4FApS^Q+!DRk4!Do>dK!~FvMv3M9uO_1VNQ6e{b4PL2>|j214O0(M{poCU>ZFCY@9kz7SoIXgmk6~ zwd5hcn(#`}urgUR7l7uW6V63GX-bIjznYIbF9jf4Ib#wuiuRqg+O*LV3P4h0F!w4b zE3q0vNLv9ViqNcC=a`VZuR;*I`{5%ef9voG8oM1!MZ?4-++z4JogZ&yRT|Fh^XE5J z*rGy1NoAN|yLx0Kw#jUm!nahn;LxOdZ1GgAAlJYiDahw)6$EbKE{=$2j1WEKwMSv7 zX&9c5T9|0*}`2`l`;!YX`&6T$p1Ma^$e5Ph7Pt+`F>6iUf zbIF6JKNVrj6p9j0#ZScoo2nf%I;D>#8^se18-=}mFIB@IgQt+;eLpZVdNN;re*72fi;F~qtU?KFK+0W@vjZ9O*8f2O>P$ya-XogbAgP^Q~F?l^ZNZ@GX#tk zJs<67oGTx{73Jpy2;2JK5uaQ#Us1~Az!bA^n#U5ot0TB%iOQIn+av$_;fH<@F)|X< zSUsNYXG~)qS&2lwoR5|hsaf>!n8P9uSNNbqCw^G(%AT{}9#;$p8BStm_cdFd_^n~H znHMsdp08ZtJL8u=#cqreKOC!u9vaDmpCfsBc@1xcm5IAc*;vk1;ZigX@oA=-1U2SLvI*yA2mr=UvL9;rW7JUwOG_MKzxe zjXJ>dp>6ftoV!GqJA4S``Gj+e@=I`^*!v211yloyq?}Mtg5&sin=M(xn0fxQ_b`^C zp{bLp;4mc)fQUtzBRTrwj~C^Ed}vUd#`1R*RtD#ghfO)viD_3R6hHL zHVMNoj-N1L|GwkJpL!J=QIBq#C}I9zd3-nxQjf6gF@Q207;n`EzT0Tuc7cgzk_Dyr zN5dFK&AB--%hgN{Sxg$HgE6bf)`^)nkOzz&_YHD!sQ0JJ;=^n6GH6Bs`Q`y-!>q6g z293l*G5LjHH2oxYij&uhLrQb+oxv2$ z8irBKB62-Th_PI1!n^Ve0wsVjgaEv6D=PiszxH>lRdj`rUO)R*2yBJ2e%gq%}Z;M@As@B&F5 zh-}k7a64SxXT~Il`07v{6e-GP$_r=eL@xjF^L2BG>ZVQSiHkw7wF=u;cdAsyYoABl z52J9_DxYx~tU8S{D|OK(27uCJaBzg+O7i~tscE|?_os6=U$OAS=3Zt)Gd60$!hJ9- z;C^@5w7<0Ksz7@=mx)UPe%aNlRrAh2U{`tvI$oPZIflh5tYVy!dvW093r=(R7E0t^ z30iz){Df+qaEU1V+LJejf5B$;3O!_)+;YZ`QKUC7=8kS}uANz65d{;`zZcnVxS}W; zj&zn#5gdq8XIMN!S2^;RlhIgQb70CqH=D0FZF!$>%JShP{i+E2&Jc})Jft3kqeZZZ zGQx*CsPOljBhy3Emow6#ZbS!&g{$!6LSPxj+Gf))&-6N|h-W$eEnBWBCb@G1Ofg}< z3P`@yo5a1UfvJZR3pW9>N=w+UsS=U=B_51&bD4X0@&DAzkGl(c|Bh=`5g*Q<`XN!I zv(Q2_B8~f3mP=Kt4P1XMi+eOU6yhItqg>Of<6SP$jE0&nEhJc>eP3GaU>aU+dmVCxp)c|fU2`-l4`l(?m;>qo?01h*9n9+Pfy9UY zEtvn!R!qOq^QC~|t@qIQ=Mg6xndgL^HH3n{!P@|ZRfOr@F@_onqw5QM{sbde@5+)C z(pF2gzNMizmoy5bu1b7cUM#$_IE6w?9H+`?q4=6v++oI_nyJhquJ$Bz<4r=r{`dn; zd=C-u-5b6lmV*0B)C`_lmSo{0oZ-2MYaz;!422B-W|wiV0@ZenK&W&7%N$q zXTp8dM(f}a241cJ+flmqC+@#{f5LAqEqC6`WPwO}Z|KIo^uw0p(#*U4Zed(_2vjV= zv9ZrXO1MwSv=sc_ZG2oB+YH#1n%pLwfjwWt<~!BKhpQ}YNB2#cljY^*)hB0Od3NHr zKNEiHgnK`+(&3O}BO$aYUD!}a{vpVMNV5Qx#C~!OS_9i}zFpcWkLbotz@XlW%R5$( zHP6_!SBxY1~qqSA66@!|J}u3pDhJZ(cdzobU{|nbE`^wv<6sZ=X# zDckmTDxXi|O<{7q_|*pP8)euJzrnQ+LE8)S9`}2JDjn7%?ze46^-KB`G7wWdCy;6q zu_5K33~M16az!0<9(09eS8R|O1XY16APax2Cpk7ArZ80fWFp%FCxep>s*U}Is4LnK zNP5e>QXaFvgdwrj_b@$1l$PAQxVN!h$(HEHNs&e~IEg9ki?uA^RTBKE-B~=mHa>r~ z#~dOBAwbgx4x;J!5ZuRFtbYvn(umT9l&=}s=Sob-_7`ebsTX%LiG4jps_EHEi0Xni zjqJi;Tbd{}Yw6P4*$b$8B3&drP&=lB&`|i z?vrG=*!Hl+@cEWHZ_zw37!Il8Lx0)r(M4O!m&yX+6xqp>wa8BoI9qF^FxbSM6PK2Vvef;bv= zsEG-8lfh5YSln@UGvj-u_my_a1y* zydR&oKAf_r>U!$$n}G`M%prN=m#hAxv>5X?^D#adP(hLEg5)nzqWWA(+*Rm?-D}VdB!R`b2@oZ?tq7Ho6P61x!?%LT|L-MD7c|R0M%r9DA zkod|V>l$z6lk{|9bFl)dIsvCXF6a=t)aZZNB%3A;w!fnpa4;+RMM*b4S;T?2AfS=M zV<40ROFKX(w;*#^#{nsO33-}>K1lLXL&PE+_nZ7%;T%IM80b*jkr^NW)Zi%$-6#L* z0FHuL(zEUo`T_h36c-)5U(t{6&H*3+uvlvWsC1OzF^|@oE!z=I&irK4m<;yESzFA5X!IJ9#e_X zRmYXP%6d&2kK_9Co}?EocF=tQ>Fn7iUx@)7rkghxQ;>)?(I`;H0;JuNCOVVN$*R&b zfDQw9NsC~bx@r94#%fHHQIVH-!U~|xjRwdOSVu$TNwZIX2fBx=^cTQ0xYfp+X{yja zO1Hipi?oP4UdQ)pWJtFI>?UQncm*b+eMm%Om(KUwrdI96YgVnQ0tj*246=MSO;!px=`)rre&*B_4PTnlM(p=d3S zKtUW-*(1;(11~gb%}7lE@z9lVPeoWH-)XP_#*k)fkMTa> zC)!_nSyf!h%Xk6FR%tDm3XlYW#TTv2?=kx2Q&+G2>xBHG0%bhHw~g^Ky4Jh+C8_Eu)!1Fo}RRYP#V& zb}6O1um7c>wbvLKcme#Aexov}- z_6ubf5XPR3&$p^$-mRO2(RRpS^Q7I&Jc?`XNdz;yi~tMyn#ICH}yuf$t_^ShgoJQHVX(R#6_ zEHfx+>gMX}&E*$m%Pz{lyD01QVCphs2W95Mg*P%p6J?(O-Ce(VXE(1eyrdrIZc+it zRwI_bT|Xu_X1<%SV{p6KDJFzPwrPwApsWR{u^vd>+nogdYC&kbYk7wYQ6|A#{Na= zErKi>*9h5$dPO@P=QhcEh&}cA^8UDt(o(Q$i^v?r%0{Tg)(Y2idDZ>!!bet~)%v3G zaYEFg8V+(gD3qNnKD+hw#1xL?%u6bRXJ9-3xkj0#|LF3bL@+|+z;n}#T4*@*t#TB!Z|(sZB#9;!q-r@@ylZ#QP|x#Fe6dyTY?y3yWsE>3pNZdKd}$mR;Sy4{ zWwC>sQ2g#G?|J(ux`AudoB|q1c40^%&hKlIb_E@+K<8wMKd3&uPUBRI>g#@$=bWTx zhTXV6Q?l}Uw|Ih!yyzgtbL2Z@F42WUu`DQH`>Y683a#Jfa!V9vjfv72!0k9$4)*P& zN_&>x)HU;1c<}VXqv5^SGt$ZXU94{9vgmwlqOE%J-0f(DXi6|i2sB}DPHJakAJ)K0 zoI5+fL$NM8z|(T;r50>RftHt1ZEulCz3!E_BSW_`%pe1ZQ^z$4d{5QKnETfZk$x() zLtozrIS6flrv+Kig}X1%Jthww-DKmblP0NE2wg`il+x{|NB}(;p{uJaobX^f(aaGA z5!%{y9x%=6h35+;cAm_z+8xIClzTkt{P8^A;es;AGV-<38Iql)J_;0bkz{S?O(&x( zbTB~T?Z$+)TM1lLkXZ+8#RnsR?5R|H0M`{>lC+Sx_CzO)g+T=->~Zrx`U}J?X_e3Y zJqMH;ybZ=-DIZi(dJ!=PLadjV`wl9A$Y21L0o86)y>f(~RN>)Vtc|UV|NQ2(Z3JXo z12IEXNpdla7!bQ%ZRq3>U&K4lo>f(K)&!w3TNAAwm*Ht2y%*OCFb(SY!;>>9SH4*cfKx!UCMkS!h_(m)N9O+74V+BfNHtxaD{3 za(i-pJnmn>`b&xx(I@>#ECQA}Xzx<)T0q8@tf{67PPZIb)0iw&*4fnO;FEj4_rT#s z0BonGZ-VKdJQ^QQHC#vfZ`>rqWGv)yyryytS zTkF^)9))^#!<{?#xrCI*G7g8l>C@|U97-4q9rWRoi&zS?n(Fr*JA6?p3IX{B4Zd)L z2n%cygwTng5nfNO+XF5h^&-thhET`_WI4o%&Ql67O)?Sdo8$sf=2uJ~P9oY!6ng<> zUErCZjFWW7(`U~Xz;=dlij|Y|xTWALzRbxw_O0pKOpa|4aZ*;rMVW}D*0ybS;ZMy% zH(A;6(NGgviDX2n0-2IXO&AIt8ht<*M+krb1H3hidn+=1YV&h!^Ym0x!)2BD%wd#_ z;=V}ViAK**T|{}vXnFjxrB&@uZug`VFf14W6>hc2(0U)rvlx;P&|3J8r^3KFINq16 zH5N;ur~()`^U{h1D((FCX2Ko!fU~rVti3#Eg>Y>Ows4nesyw6a-_X z?7DRdX!nE@g1O!S#;m6~LoPBYpy6%<7o}y`2f7SVU5zaCxh%4FsNCinyOuM|inB*m z4Oo`_KWW{wroT>nF5X3$e22&*n#1_@E5!?ly7;dD*8OHO)RPWcU1|}`>Xz1wXsKVsT)4?g~&n}=J= zhraU-2oRR?ESi63$dBtkJ~Z{%TxPJ^z!G;#%;N&ok*f8fUiP^m5pLKgv8uWTJYn*4e+?u3~#_X#;2@vbLLh_4%Sa1R2(!>qsPBF zCHywtB+gz|sj1|{7cLFNP_eJTqT^IbPwV@M@IHRmlY4_ zjg)9UrLW?kypw7&m`+IQ2~81&G?2-hOdp<>g}d_0xsQRj(}GsL76v|!;$N)khMlFBXfEI*2A~@D*&M}Qi z<5Zdu0???XkQ+P)W(A$lwo9S-{eD)?vfG2-5Y`N)9(NfKGvRDxH5A$nuLWv#n&VxA zo@^6paOP}5!PT@(ReGa1ynp=&st;&Xo7Tol3s>VQ2+Oj>|IUL&1}8m}MYaZ)HpcTQ z{F8m|T-6vbaw*j0(2Nsh|5Xq2-BYd~vs@#_#FIaD6OM$l?Gjr0o))fjs>9P`fY6F4 zr9T)(8$3r7>ka(k8$rQ$7xD$44mykrOiz-n+_HUaDcHze1PngXDU2~|ifH@z@uR#! zJ7Oyq{_ETM1lEU#KzdmJceKcha8y5E=Jf8#&1bOC%`+s{vYp-CgGWh!eia){@(Q@K z@#znK`^ScVGF(g0KE`!1Sro!Wd<(KoA`0sW;=okK!&A^aRztdPgToL#OSCO}yW;YT$Yrpw$f0vS5Gh$^r}9E`UX|{`_+#dSyLnH(y+gjaYJQPUfFIg*fV)Ygh=ANo z1f_s72bNbL@LZml(J}i`hyW>1*|cf=Q{IAgN^AfQU1bG@S9$Yzau)#Df}X=cZrwT( zJM>XV(nd?`j}mI2+U)*KHIu8f;r36Aet0+jgo>tx3ll8xhBk21iG_0=Bz=LS`8(;` zT6udE7i*MPRP+b?L!bmP_dc|HX%?E4v_>W++AZhk1Q-T{XDXR)F$-&Ne1Kd zg_$Z2#1TNTL>flw7fGjw5VWT4xPa&S7Yu&4ALMT392gvh5ll*e0RoC!XhD&o4?{@* z!cvV=9^xRmgJ^n_BLkP>h5I{=qp}%G(c%)sQ=aaIE}*p1sqFL10630FWk$wbSMZ2c+L!tbhn}Ll>0Vt)XxTvTBa~XkNcz37_FVWkx=SUubwnS z1tO_^pfqd2B5%Qhq^LVcEvUpoEqob*1(3WE2vXDp0QPCW1o!U@n{T(r0mNca3l0cS z2w^e6ynGhw(v%=P^02~g!o0Mj%w1r>tPwTdq4_KKcL$s)ouUH^o-0KW86d_Q44fd0 zD1k5}k{?)T&YWCqo*kOJ4w5r|JFru0*%SmR-DYe^y#eOd2w`wjeXx(1zP*PIz#Y2J~y!Fcq zSbBlpFK(G=8ieNu=veD|fBMyZK44;TvF8AUB zUFQoIbTwri*Sm|dfS4sjNT^IlVF*l%3f=J7H@q}8lrSmBiVi%^WDdZ&>fCy;5+Ma} zq!J!VN2E1UZ7m>}Zmm;wXc#eyqfy7C>%M^&DC<)nrKJT&RS=vB30I_HDiTc!43fwf zL(Y;0TSzRRXeL-;S4q4D3QbogRGu5D4#Msc_Ucuz{aZpv9GAQdynA;Ai^hP`TrXtM zA@$!7)>Z&*fM#=uiY6Zf*dMXr34!v8zd@)oH-Ax&b^hO_=8A!#V<6v9Lh={wJva!+j0@Ty=Z?z~gj;W?N1fXjTH&LnW!sUYbFz?#x)1nxVT0-vL zj5rX49g~& zf}WlpOR|ms2KZN@>C>-0xwt4pd?i%Vd3tV|{)Z0_pxTozCfvfc5i3_Rrtn)geXMM4 zt$ye-O~FM&;xcfveqbhYJ1xd3B~;KM`65*F_7<3CkvKSba5X6W(0%W)k<+L#7L`R1 z!4&hR0^+K|r~Qzai~=@IELparMkP5L0c5rf;4Z8^)-sergT`L-mp?&x0su5z)2O~X z4ft*)DHbiM0P3H>8`~bTcA%5jLQ071^BwE#nvmL{RZOe%myP!Zg596{qB66)beiWo(Rw{5W4;B|;ZX#PBw({Z%neS{08 zqU5UhyMi0Ybm%c^!s8I~J4;ODkT>;VX1v`qixsfy5(=;{NJg{SPK7OC@BTqgdg zZTp4|8|JpyV)WRUF=OyoDE%{-m4XXE zc7>q3x|m(5fZxVFfv{QQ^+mSssZpn%rJSoWE7gx1?CxdXBo2fM(E^XmbUD=YtEx;Z z6j6|ir#ZGJz4t~!f-D;VtJ3F`4{$Qe&doT_bmR?*eJm*>vjLU3yW|~|!s@Rt!nWa5 zd(mC|PWdObg0t1D?BrQ2){GY+XS<{X8Wterr2Ug7IBQX->bKJV><;Wk`ImKtHhI|* zKaHb44>32T?QMJ4E0(lLfH9dey3&SEAZJ^Qf)Ve{n!E)94VfpquD~2Ud;ZzkAV;t> zgn!Ff^8&ubgOr}9;8J=g zDdv(hYWJYf%cqCFw4-8)BxFXE&WRBdM$ma69>~!j<1a{M24TSUHBS6Nv~gx-m3L+4 zqXtV$7&U5C#HNvcsRB#ET(%lPUL^1pB_BC5s=K*S#7>CP%K$Wx(lPZ{MjAX?!mZ`& ztCxl`v`(HnrHBde4HND-xZ!8kebPDB)iol5B!F7)s-CMiyNKQkF0|c{+5}hb9wcFf zeL3C;-y4mvXZsRKf#OfBa31_FI2V*(<}0x24Wptg-5=URftwT~{#_L0ZD~7Y3eTDq zSSU#XM!;C>*!77mS|u1VqnES?P$pKtAPXr+6j%WxA0*OQFp^K>#32JIQ?6SqNTXFy zgHejgRNJOaF1iDOlmE-P@(*3_vfBf8V;C~q7yM5(4s+}uSEwrhO&0{|6q5BEsWJud zNs0b=_VVCAxJCg@NSvqP+*tqQqQs4=ftpPTHboT86bOR)Iv7Q&_dI% zj2xXatZc*?G?OZ=xNg{4PhlCE;=|d6{^-p%Vp{ zlF53khqQ!Jl<@knU7n}gv8H*vZlovYppzK5A3o)IBIN?b=Nk>41bKc0$4yB zY@vWlca*?BdN2a#q?!*z5+MfPs0*N9r-%b!e9%yaU`?e)8H11}76n%k@SSUGI1s3s zET7{V1!1OT6+2u#J?m^rKuPeXI-4wpl{TqRq99DY(xwD)O>lX>o{*`gsv1ag(^#pV zkLMtW3oH0EC|gBD;4HgN$j~gx*rw^~V`yC^@6^cu?ss`3$|*jl>Y|v4hGwt?!Jg)* zxRMG8QpXXQ=2#8Aegi4mT)e!r%RR8pl_65%fK2!q%cx%J>A)Y)Lj+tzm5{q*v7*Rr zx5_5n8RDO-wmBE`UJr+m%k+}H3<~9-y~SYquOV1Uj!g-IfcWK*WfHXSB~?sX+Ca?k zet;;z{KbJ%fg;1LeErxjlN5ats{} zLU~uWixbEAycXNXH*bK4sBqibDTt@7P&h~y=?wecq>kelykwv>lx-Qse|Aoc z0%1WvGU0s>x&lbblv2sg@_z=qyK>n>1&C%LE|>?ug3G0VTi95D9a7e@{owY$tgYh- zR3r3-`nSDjYUA4g>;Zp_fz@@PB2?E)h zt9;J3y`-=TN+E!t6B^hg^$q^MzEw!uY!RCv2kYY~3NUD>EWm@+S+OKJh_tDq7Kih4 zKfJrE^fMq8nLxL%rMLm_zm6Q-;7%nhiI8b3ef46}E`aI}$JKspp?UP!u^J@S(r_PJ zT}l_C*i)ynDzZ0{20&Y*7B#7}>y}T8`AN@;he?nHurQZ2QYN>7q`6p`d~M41l(u7O z27!m82OYCFqEOAFtlwY$i3S?iB6=e%Is;)qTd@&-Ogsw(fkdT;4^r~6PUOi)X}bhr zYIwk&?!Wyg?I4?qRS(NK(@Y%vL9u5 zA6&0dX?SfB1E8rb_7OcnviB zqJR8b()yIX{mue7@(KF1Z!anZ!%SgVC@GP0zXfI?CA{l#zx_tz?0XI=w4Kby1$$d- zwY#>G*q8!95zOCsO@_W7Zpe2W4VGIbQydM$?njJnE@Pbye~`l3b9`1*!}TgYv<~e5*KiQRcGE zupfOKE|%Q0PcaNH1hia=g<=TVBEC@96z?CA2yGa7o0O)M7={mnBLpQHy4PyDns`H9+jgs8e$<-5sX40tUtVUnEJ7g%xOZNtlt zFC3!>9@$yS&@N#Pw+pQS#InKn5ds367JG(x*)N{BmZwggx-qMl)w6JB4GdicaL?k- z;0aZSN6d@-7TGkiaSn60HSu-7hm_8V~T9xc!D1#s}>dmS64lcb|GQboE z0fku6f`f@52pHO$B5YbjjJSpj)Ya)0OF;E|rfo*$T3}vVBr=G9ngze9ZeKo)nT^=Tb zT{?W^Y5ha4j>y6K{>|_l`onyC$(16l^F1Kq4>GDFG0&Mux91!Sl8WBMEZ(QqyLJ+?3UPaBwi zd%gxuLsfu>+Kq|){DKh80fD;ziS$Rw^vB9$k1RC9RIjfRMBdY#f7|y?pE&Uhtgb)IKh(q1KSl=tNvGZj{+)jT$dYmuaMWNU-12&V^8M?LTpEVq z_cc3?_?q`W$E^3tWr8m#-4~J35njAFzaPOp#3AslwfvpL#Znj@g54-_->&F?E5!Jc zWpJnEl{U}3Puwn;wSI^q+||oa#W4Z8$#ax-dyp?INmG8V+|b$>xBNQD**V*x6VH$Pn#hZ;g}QT-uqX~EtTK=#BycQ0*H{MjDv}q3Nj4S ze`wnOH}I*bKe?Mkq1rHZ3?u6241Bq=19zkLcd^5?nJvBQHFV487aJEcQ%!SW>9BX{ zWoxYiw@lVSvv;9Pyj&V~dP1WSJHPQjJ;IpV+?u*LIpM9DC21sHKYUGx}=e_D9?`b0YEw9DM ze8WKm=U?Hm3>8>vdF$Y;3{@yri$qgBygy4*k66EX7L<%8&kEbBPYa{z0Hp;Qe5 zLh!Ii(#eM;Aloe%p}+i!K_45^%Pt*?JpX+x%y&&l<;cj{oBgZMkbV+NG_=m^y_yPR z;ch3Bp%n6alUfh}3z-~Ir%;Jtjs{wyP$;Lgb)R%g3Q;a;ed~P|0`*RCIhBOIK8_Jo zzmP=`4emi*vUbtfU6hX2IESe`!D#~(mLVc1sJO-t)GI&kT@^>Lmu~Qq)&x|I-3d`u zGW)<~2+s<9Q-bLfs(>(($jy`p`xd@W5!;mO9g+48BV7(H0rUx+!_qCTCPWfS`7EEa z(vSc*-yI=hfQ25VJe~WJH9VPDcwF7xh45S?+U!R}$?J=tF9r{F6R9TJ973Qd1d37B zcvk9@sr!W)jQU}hfr#@`J z*Wi)wcHPWy2Id%D01gIWUHU=0ic9)!TA7!ZMBFuwLC72)Wymd2e&=p?;eNWnTHIRz z5OQp1qznK8Dy<|_JrIny@!?dMa=Jw|Ji)OoXD*;dOc+VR7b^gjvr{0M)>ao!39 zfZ>nWzrkam-f&iUffEi;pqj`~Hs3tfoOa*6DR3f;_k0g~q9AyOn;)nLY z$zSE@AaO)lTtl<~kOp;|ptlQU%bV&_H}+Psuj@ED5nHCK}Ww{mhDf z!}pH#b6GhfRl{`FL*_AWR3cb?ziI4?y-V>1R-=XpnSov(O$SyJo1Um!81N=Nf3%kL z-MNZH@Bdq_8DKcQ%fxs^(G4N$Zp$~N?1Zc$Eo0#N)m@ReSnKH!O~k|TGZZh38FHr} z&g1^ffa9qeZQ<@Q!{+_!^?VyKvwEk0=yfN?BG&&FJNOMzkk*^(3|z@jLm&O`;xde^ zdH<2-{3OWM@M(gfn>kd6a`Qqs7})^C)9?g+vKK4OX4EvxeWvmKPBQ_0+&Lz&`t`PG*Hs=4gGjl0D`Bo&U$?W zPLef(rgFy)WB|Pb^w5!DGw7q~hTdGBy}~T#mIidl08(e6_2rG3sEM<-9#z=;$u8fz z8)9N45Ib(hrT`Q&q^pLvTZ6VQWC;Q}xCRThcwsy|7rmAU!^fPkLDvIzLeiH=+VMUi=F5q2r3R7_>JK^sf_J=d@JVTar#dlRI~3DoD?( z@riZImoLAZh8_YbaMxf?>@5cS!w6%bA;fcL0~t&`bu!?0h>pBh{nO9iKD`&I?lBm) zelwDZ&caS=*^Hc@?*~dst5PihCSq*^Nj8CtCn;e7TlTSUWFp6^0WTqJZ{h!)!Lh<= zH>rH0VGY@t2#g(Ip;Hr$OgRBT+dWycrf35wK_S9}vabJETJ0Htcjz^OE_}1JFb9|e zj#sS8OY78vkJY@Qk-XlH{CuQU_=fn5Nqb@~msrQuKN$|120F=fz5=djjZXu|5Mc6c zN;O$ukql5|sexPb4Qod*-Hfjokm)GyUJ0;syAlv;9~xppafpq+5`y_*_wGjiZwwIO zj|rpw#snvIn=5f8her0i;0*fZIBfV{lKxxCqL~A1GU41vD!sr9l4WYoJJwko+mxD# z**f?dY>I=qT@2cYw$Q*QWx45dPyuy(g^*fGiz5iJxD^(;JDr$(U?oC~yUl~*mS z(asqFEsChel+aO7hi?BDMQ~W=7RrFMPx^fU{%IVKFm7S$ujmUTM?yv>bx#v*UXb-! z0q8;PL_Y84;s6>TAD_+F75AVhNTKD4CG}GwFCoT+H4W*wN_ks@7eVtyqSXj?AU5qj zR*D4YD?r`W!jEW_!1*f3;uD?MYzuF4;Qz*3Kkfj# zsKD!YFq(|^b%A5YX9bLq6GKyqhW~6{N!+{?{n-k^`Vd3fA}koFR`h{c3vsJqm_1Mw|p`|!As>|zj!j7Plct)p&1DOClcXDk=} zQ?|(S*FRc=oW&uWP6eK1asznM;3g6G5h~6!YM5Y`3Y}ztcF|8h51bX!`#)fg1_*dH zIzl;+wIxcN!A0aGXt{;}*srE<$4GXGZE(UnFkJ1!scNaOuQz=t;jrK->7)?P%@MmJ zel!0nP1L|2rIg_x3?th37XG=Rt8zDG?(lP)yzlzRJJ7=c1ReUyX&`TLS#c*x4V@hu zG>Ob0J_OPG4C;ge-vmM}f!{;?GK@q({Z|$-W7DN70r1-D<91_*oQtF`W3>FL`QfY5 z_A53;nr6SsT@5wETlaE|%z{i)$MC<|ICj7T;J)GG!&bhCVeQ|pUb(XAO9!wFu>V%F zNYNOB*t=JQA_LtcF(yc>{F9f0RtKWH7!wH&C{;7&PXob(4pFV@r<2Bx8N)Yuau@;^ z6o+)=0?Ry+z(KwtCcgy|T@{tW3tgTKzMvo_cQ4-)x(zdA! z0;3y2MHBu<_9ow_;z0kT-?KZ2M8!EMYU8_-!^5?AUBg~YN1o=ux5|o>UqzouW z^mN~d7hLDiDZf!@-OIbJN0?l%A2t;kk}nc!pa;_aIhe%n(9bQa2|xYZ&1Wf#d>P7H zue?6^Nif;5B3%AZX+(tFqxQv%W=M9aj!rbgL^)~4PT1J((dsOaGj6-d{^CP5LpUS+ z$^;hkca0euWDX$y!(F2x3F{7>LfHd5__riP*xe%=TM`vkyGcMpKoN}~YAA>Xuk}@; z_!W_-2=w;=ibydgS_0w3ozQ-?2Ff-Fe3b`bE>KtNWu4j(X8^i1h%ew6ICSjg=GJza zo7fx3 z9b_-9@^`52vg``m{oa2?BO&qKp$pJ##QL9HDme3wWjDowR$nMpHtSVO1%UEo_FxTY zbT_P5!pv#I4P`u zH%oc@UvI%ELumTKbuFLXmmbthZw2S5%~Ub+_*n1nM3W`Wqg)&Bldn5oN)}DGV}C}6 zZLU^E2Zh8xY>STx)F}6~x#r&J-iqYzO7EB07-vIM#39@tD^N&Cuc8AgU_+#teN zeLDlSy=N_WVDGhE3W#Y9i#48yw|AuyHxJlMh*e=m2c7g%g2+RK;YYiQg|sLDnr%_f zt$88Kh1&9#m16%RbJoElqbiRr6Bmb4Z|-i-Wy_ZBY%F^jA9`U`MX(fx^uH{~!A%n{ zH`wMAR@7RO4`mSp4tT?QxiIiS(8stcN@}2M6&s-Xpa$%e^yNi^ajK&6ZFj5`xQ-13 z1mL&?c453!tP4_sX~^A@TO1)8??$N=V%Uf+8)zZ-E{B*Df`_7EYr{|yoRDlq76Wz-??#0SJkH7u z@lHfZdha_3o+ziLs~@XifRTy(j)dC4*{DL5He>!5SdwMo`Pk>{G;wH8AnCt+&LP|XyOb%7 z5R=;)5<=p{W0WY%E)V}ffdHvHcn(2Hl4fg3;RE_FW?|9Gp>LiJ4jA&RFl3>W^AhV| z`PuQ;kbGv_sU=(5WVaSOj=3wj{}rL*`+WblDf!zI2ZtERHg%P z;DZ?lRlb;s^+R(;w|pKAsgfciz=ZevLK9d!4h`%EOyYjxxr0G7Dd1W-n|ys7+fCx| z+sUwxq|AcBX9DYX9fmbXU=K2*RF_kcMBxjZsRCps5JAgAH#szaEefiI3g4S2US|=; z5ZUlLS_>bAg(91-hC)#;>4p~1gY_5-xkfXD^>iMM!pIW**n9X!BwGI~eOVff?dmzc z-WEzx;~BOeJ3DRudxs7qk7qZJ9l-4phl8&{el*!(j4X}+A~*l@Tvl&0+LBF&aCtyN z=)lpctMiZl5^Mdn7WL^JX$+|u_&j_8bpRz%R}Gttl-y9OtV0o?@*ljF>3FIbOsYma z!kBc*1C)R8jl?WG{v(@sa-N=5+WY&@5=PK;(>M-yfKQY}f`4=#S{9ot4klZ^vPW-! z9-(5ysm8k_cfcwL2;Ke+@sdGc-Lt2;KqmV29PFywaP)y#DP^`8(Lm~e>NwxiXwudj z$_5zbkcn^Ix)8ed7;Rb!QRE`gJO2v%##|*S;KhKkrLC*`$;_T~qrp_x1SRa!`SX6j zeTfH5YCyofsAGg;nBqw`c<&%BkisQm(>mg6e!7=p8u}j-(PJ@Vsj8-ygG9wkb9)n>LG*IyVUi74G$lMBL!tfE z1(BZcb5r-!hoQoTy)hUxRQFhcgr#c)_)3jB|$45tN?3qGxJ7A!uN)*QHqv z1OxT7);JD|MQ!>x0G>j?v0U~_IKTn|W}pWOLb$;k;elmUGar+XB*Mwm1d(!%UA-|8P2qI^ZAXvid?;L{mmvtj)HWC*_wFyMcWv!3H!xP~JxIOy(-%`8=eshrT;jV?n zY>p=mJ4f&N(34%PDhY^sT|?)5D)Ft{Z5;x8H=?cO0#vQaA-yE4`freZm~@! zd*mI$VZgX-F{4)lsbP{Zz|Wx4u!Rm{*7E4@D`CZ9GouW|7c(;7BYP>NF1Q*%ai+lI zX*h%ADPYJ$d=M0h2tn%!iZ}ld0O<%dEG&0KJ1I6F4_dp~vHP3;MimjQMjL_7r0~4m zkJjKdv0UaofW#g_c_j1!tHQ(PYe+b%cMVoWT9V3_kHj`~P7Z zJoNwJC%c9*yYv{0OaE>HfISVoE&3P{FMhAFbZqk!PB{}cmHnFRs0CD4|Cd=sV!8e= z!V14}Qks+4f3J+6ZuW-{t-1rBCiSgQ^=IcaD7aJyyjy-=VNYN%(*F;;%lf~mX!Smh z5!5a=w=kRz{c*DW{T;g_?3b1rKe|T!aIb?lgHj~+uUN2b8kNz%V!jrSJK2BhjJ|t^ zCxF)0BfWmafK=VxxBsROfCjbN#Q!g&_&-dL|715tqwW80N3&P)Ke()na{t@h{SUHD zJUS$v#(s(4d;Tvqd#zcXP81jF9XLpWi3D_DfAEqxCPfA-|CnQJ9m#f9A+h!Ui^0l| ztKo4DH)GMMpT^D2dhJVK((r?e3EMm3!VfIboLXLuc`z0Pd4~ zr;bjUadhalqkI`(>P9)vR$a}>eDRvOT|X`PX|mC)h5M8|>=kFXIDU~I=;7G$8Dw%W zoS(LT%?w=X>;VnL`9hqr;~l1~zTR_$`PFIWsqRtJF5q9CN0?>8`NufNJ8vzuSbw^D zbWypaR*<2>AiG)Zi#(V047pgqTuc=c5Lt8K`|7s^q?*G(CSOggrqQ!?Db+oLIObFYchHbiKFYeoYX!h*kyY8@i$ILLI-L z1PI3DZ%UgYlwMKHU%*WhI|jrfW(LYUn~K*Hh`ZF-EOxH{2`++A_mekQU`&dVzxhf_ z6VeO$GX@+S98|er{|wIZ^A4b&86Nc!!+{Hg$vR~0bhX#(;h6xm zz2=yJzJ-$S!h@0EXz(~lCzhwapP;Z~hbJ$WzVwtSi*Rp&Pn|ukk2>1FuG4!Si#GNz z??>tksYApEfp`Z~6U8$L&MH3iMn}fpUZ(3Z+I$aCbm%@rAH@TMKw`|shK6)dTCP?q z0rSu&4-8fSD9$_azK_X|m~z4}Md$H2u60|8Nd-`94Hmu1ukQXDV?8IS*R|Qb^;>*T zS}M(a>C&YHcvm*T45cEZBNo6g|HO$`DwSrWWWey&+I+a~nL@|x?7Z-p>X}7J=s$Ux z32}b;<@3ciJb?zFDK^jKjf-mNXJJv4G)(^(9uxNLF1 zsWQzBB#?HX1~%QD`yuk9hUyqq-VJ2t&!t={SOMS=NJ0uZu1HlFWVAG-d{Mh4j%$A9SD2z5QUq&Os*V??!9Fr3D9+M*$mliJ|8 zXpu7-{BVXob0!lD)q2;uYfHG1TB@>u&Un?%I*Ri3K(lJ?AInr!%46h=KtVw$okFF? z>*n$JXt=X817ZRd@(H&~J)bymW=GqU3)cDG&y1M*Im2?!3~l{2_9>O`DiDWrZW^>- zwUN==SgdqNKC(@$Gy-u_46YHNLTa zb39FEveesI*|OU6l6Sa&HIjV&5l$T$E5Gb7=~>IYaM}uAMCj^S$u`j}i`5=IJ=G(- z3d@trs<;+-gngQoTz39r!bPqg<>($y^af^72(Vs@M(wsP?=Q`xnqnN&t_7}j-g;ki zVT~tG`@WK%>DI|*Ma<-~Q%=*@DnV@JnQp1X%icdKlJ*KF1j|nbiwa+y-tDc}>?xDb zT_VB$-|Ldg;&{Dg3AO|p>9#dlObi*S(-QFarC~O7L*B`cEG9K0g71kIV{d;? zHWRjEpX|MS(+#4{_q8dJenj=@yviL3PK5)j&soBq5ZEb-=)P2Hh3Q5MCf!P*X+<=W zHsO3}ir81Ue+4)nX^{o6`KGoF6Ep#H2uHI*9UVbDf-p$AeLMt32oZ0yM;s)2M^hUV z^25i6E(N#r0i3y+Y-%Ui$GQ>cmf~ml1q5#5lO}NX)ydDv3tV)owe1X!;4M3jC+t)h zlsLY9QQh*MA$ykhR2EeAAZ$BdPaZmR{!m`t_?mg}jv*rx$DXczJ2cR4+$hgu=;0az zDtAh5r9rIStTcy}tb_!o$V%{R+a_}FZL6%@m-WuK4P+|4LjzS@R8IDZwnTG#dwWu# zu57|xVzy>p-aL=+;qG~zSEAdR_B!crmKGPU>VItNjfM84xA{>m^rgLB*J5lj+@TxNWbb_p;$8E7 zd=+dZ^R_1LKjrGWPboF+TVl4{xC#5D6?vns8eLFtpX0gcSFv-;pbfLu#n_l=hiJWQ zQ$%RW>%bDRR8qpbieGLfr0kgA7PaIUPl#@`Rb)%3Rxr!_%5$USFvpV1kbp$bWq~i| z{<^dyJMA%o=X(P1-HdutlmY zg@Rs1%o-5ZdE(N*f_nM!W6#eQGux@#zOt0Ca)6iekfEW`QOBRM|FV&J+!aCr+qD<(*(mC9eUrENki0DjBFj+{z zMoM`6`L&gwY`pJh(s4I#^k_fCvj39fMZZH1a)UruQYLC^s_F*SmaP_VW5 zkq>#)e#1*n@9Fy3Bk!vWz66BgR3gp62yZ3_BV0skF1jO^rJgImSEaHPTr}z&tvhhw z+t_r-f>g0n1Yvor07>l-o1`wzHbm5b^#R^d*0Jg;Fd?~t0>wGr*kxkk@NAa2&3?vRAaVmIJn686Xdjz+wa z_E6$B3g+S5R-?I?M*dQ803{I6i9|@uPgt|Lq#Luc(0?YsfoQe#p?vEzHOt{qaXH%6 zeUR=985uQ}7EH^pb37w)kfU&1t_LFwA}S(zk=kLp@(6~i z=<}*WqcRIXy?xyy`?kg_$mocb+n3cV>~9QMUHbf6dW)iG0(ggstCG%Q&h=s};`{-7V`+_1n+BgP}{bl&#OVoY+@uLBT z`_}_u)cZDpKHqp4H`;vfcJsi#QrB$t{H{>Q5MRZY*dGBHMH)YpFn0$3FXa>WOh(f4 ze_NPId@pC&Un)LN!6WQe>J6!@NPjKHs1DXJQJPub#Xd`e&6!Pos3a zuMB*FYOtG<^Dp@5qPu@wewj4NlZrHa-fQDXuTD9BslN|{C{1LpkfwGT{Vl!Cj^+)zbl`%@GR?p_zXKf9z(|Ib11Zt_IXlu z0I3x8h)onm*h&1dTE}wh|A+kx`u|Wb{_6$(!>ytwXE5acZM}yD;`dID$}J~d6B(59)C(s9(&H;w*r$KHY|>F19q7=WPr)7JNR&tfJp85x`_+=gTIzoq z3`L$_KdAUX?0^$Utl1SWs;b{syjuqS^jnjbN3HckzE4NzUTi&p3_1UO!BX}=CbKBf z3%od07J)MJM%#(~@-_e8*R9-+n^*tM%VIA+ev|2R^Ez~({%+4w1i2U7oBJ;A&3$_x zbjyaXa}kd{GpAuj__TQ4PG|YDDTGI5 zpbE2X6wQ45c*H-&mw{JHF=OBl|NT8b-6%nrnaGeW1b7XgVaiRCj@{R*N1v_Mjkzak z()H!WagkY-uj>WYoG6ND?LO9vc@)WDSMg1%&0I^*)J5S1jiJ~tUffLUGY=!{>>BW1rlW^RMLB2!TUtCF)jAAFBLOJ zSKK904(G>ZS%_A6O`wMpJMKGFh!dI}+Ij&$@p)5cw>6Bq-}|z^<1_SpLiFQefIKGs zGr0Km+i;CN%EuQy73{siUd)mf0KFd~=%PJAArQyP6TXv(kcOcT2*~`WuY!NvTtY(I zhV1*>KJ8y4KZVI#10!tCle9ZKRWkn^Z;kjXT7ADjb>xbzIhTuNukb!D)gU1B)>>ur z@2}#=D+Onf9XEQ0?CTAZUT#}|^|RksUDR;1pU_e^+*;wqHGbxy-bFG1&9b)}r#IcU@+b^)Fr)Y5ua)8!TV;&Wh^_34Lf_i}dBpg(H3YsDCXWy}(+y z0f3a9-QYWZNMvTFj*dnxoe6nCGDSg$xEi=5dfh0_BCzc_b0xWmJ+LfYUfBd*u%2ZHaXrMvSF&ty3_^AN{f&ml z3yyXSlFXm}7ANA?-nib|XXwimrx7F%fOalUd%iAqt3s3sR#Yyab1}V%#n~XfOYIg$ zMGD%Vg0F$TA{S4nb@i(nMCtE`%a8GfqAKe5UF^IGw?!AqD=1fFpASjBwsw7o{ZTXm z-X^D@@IlOS;EwpRvO@*yd0_Wxqr^LWc#6(d6e|kC0+9@Nh@`)J_s-sFpkPSMR%aX$ zt%f6EYBJ#eH#%2=3dsu#>`g5KSQD|kp#5~7Jqk2=eKTm_HCbCv3KPOTv^UqPvcR3N z0k2muPb)}$F|tXP%*c+^&*KqKsTgBwBIg0_Y`tqNW<7ZLC0DYL23jvXj?YqgiZKz$ zJ-xD>lsxNL$Dj&@L9JfiVf-c@F6<+>WV2H8yc72|A1Fb7L3QaV>#M3Q9%q|QzgCoG z)_fMYZWwCgVxDu;H>da(A2TDaJ6d3wPC!z%m-jWm-q?GUF>iJVxbjzd)_6!>EuF2} zK8wGijt@DgXp3Nva#!2ObP1^^ROPg+>vDWj8bz4n!lQ{kn989j~RI^N$VTd(GA>soNR z;)U{_l!eur-C!u2EV)^Xzt?=+`-{xNUoWiA;Eu8?e^u=rIO@P_df?kKl&==nxJBy# zfv&XF?T!i%UZ>INonMV~L~Oso7ogH0aAv$8a^Tf0rr2~m#*#`|{kD>CYjvWd;$(#d zKD9tXfxBa&RJh|~#}-YdPPAcyxlcf-?wy{V8+r@ZtMkr#;6wmo+FQDa*kk$0yn8Fg_*2<1XESjS}CZ`ko2`X3w2u$@BfOjP&kvr&u%CWj}<Dv~&bi4LGkQ^Q@O0q2{~<1 zh`=h7$Ht~n2SHD3jD^5IB{jVljniu}ODkM|VPz>nxFRB@A?9cRezsrzz3%1{;KR2d zwzze6b+MRrT4wJ0+_KE54;g|VzlB0y1T(ZvPcb*ojy!f+`vvBBo$wSH6_MG>^?4X=;74fO5$%T zvA6MV1!0n#hq#8Ag83h~^pJo&&Sug0PD9>*TDtM)JTjAeYX2X4E9a!;-z)Kd z*vA9h0VK2H^VnU}e@-U*IR^jta&~BMu_gGM$Hz`2`6abxq7wMs&;494Azd9{H~eGv z`5oLEcsl<&>F(`U6*>I4VgsKpo;4bJqL`XdWr3X?h?HZZ%h1B(|B(9k22YFW%YP~u z`mg18=-qD$K-B&H|NOL|dbed_$59+NkSSDBQlh|KZJ|D5o3Sw|pR=@XZ<^cIs_uK@ z;6Ekny^pI+13%<|?V1}a%wst7Iu3?cuU`EnG=SQOFAvcP4?B7Cq*?)l0O>xegEOk*vkv`S?)`om@Q}E7hMl&qLKtFl$%^t(Z$EO~e zAJduaZoF|5WON?yEHcOZ(6){4?9SxK79uJP%>+nNgV_pi8IR+m^RCQgKi|3fEz5#9^uBjjNIPQx>!JnYzf*}$kHOBpG}ONB@gYJIeQcA(^#1?1+L{LIr6f2eZ*7GC<_;7_Ib-wRcSdIr@MGki0{^cFm{@6E^e z7YmLTJ7$bSL}4hyXA6e3cei;Vo5zTX>ppE*uGqeeihS%tn^4(#(hXuW!HceP~@a zx9vpiTVOk>hcq!Y(j z;$_;r#B!_)9;^!qENSt-+Ad$2i46_)cPqy>HyOQ65M|SW#0@7G7Yy~TW73(5=aQr| zsj>+u>0ae}$PC|)H`}iLP~NdKirk68?PqG@0LcbZNl-Z+lwt@H|H*7@Y00G$_w8GC zkLRu|a}ZNNjlvizv7I67f^TRBQHT!15YVk%%!zG<77J_Tt5z9jRU>xgpvq^nXFPsSR9Ne5x-%$ZX&m;kO)EVyB^1&WX+livXJgeuGw#PTAW-x;}5D;Hxr z$3boI2MQBIKTpQ>07K3jZ-g6oI9bQpKRkE#Eb`tsQKqnc{UPXS7Kh9%zD5_;HYAfM zaMV64lAvr%Z73{?nW_z-7_cdIE33v$H?cIY#Y5b0S0U)IC6&gh(A=tK?cs_BM z8=!TFMtUendNDIK?WXpHp|L0m81=kZx0!$wv_iBb;ApnM0ED#=@C(2uS_rPiP6U!o z_qLrnBXVpysCGVTUUJv|GJZv~xNZ8X&+bW+9xYOAA_~DBN>5NiG5s+2a8V{Y?NaqG zjnF^hqlUXgsT^k z<~)Ytmw-yVyeDn@>753wG-s&8Stca}oA93bW^#5oGxg|npe(*XuK@22&yI&VBmNhX zd8v{YMI-y(1!h!nXrK>!W=RA&BJ9e`p+Tl+7amW#1u;E$V;Y&@-d($MM_wfPehMrb zvX*u%0;MH7fXSk(HtX@hXV2lbjagwH`gyPo z`+vSfaD37*B+a`ooIgK>8H3$>?Zb32F(wN!a~*w9u5r4OQapZ@a-m=c2hVjR`M7zM zQsP<%S3E+>!Ed~weoWt?0Mj3LG73tducjXtnKrHB%7^!ndY6h5Rr`W@@9O|p;gWgY z2UwXNa!TA`Y;1zFkre<8Cu)j5)w>5;)kfU3w&nAKdayC4U)Oa|V+ciKVhdRA-Edu-5Y)fpqbNmj^u{iRSQwAX2* z5x#`x%8M5FKi&=VjDk6$9uwye#|Q3Zqx%gKih;tBTX^^$A9IjinxW)CK8DCHQ9P2) zgST>-bLR(`gnWr@QmbT^fg?;)u^XXU_2&MY$47>c0w|3_OatWy^sdPP(+GRI1 zFA^px`%DmeSu!(B3$dZeJOxOyCbl{M`0+t_E=kXy!>(%yc^lx7Y|D$h-w-#FQsxF^L3Cj6l&DTN-XFb4h{0K&{ zQ4S=}SuK7CSRx#=v*kIVG82Ni@mr8SxN4@P7iF=SVi=fhUhyO9mXtDE`oS8 z;O_@J)2GVV`@QX3vyr~dNVc&XlPm9@2@F#j%-8=?eKDS2hR+|QLr;5<^3iAn?KH$f$gs5Ma7BNHKW z$P~F78+UNH3#=*btVoS`k#QPWGKx@#U{WrOH2+=bgo%8^Y_Y&0CLFDmZn= zl7s|Id}n^lbSBt~{O({Yp(iM$DAXH`KEUSSx>IMV@M#y9)Nk$O(XEwr6}U!{X4t~B zSNA@|hZ80$JH}$nyxe}qL&y3?!CN?B0@b`jAgc68Y`L9G-zMauO(=+00rks}L!{HJ97gaY@e^sE5xE1gb!xAjn2h7tEr0xfQEZWhJYL7_o>v~SdlUrw4$^CKDtqr5b{(zrQzmvhRyN9evl|_Xobob&DKf7Xp759;&vMc*)In1d}W#zdn;`qkSaZbXgg%ovcpZ823Z5YjGJZ@kV!??OIvIkDvCo&Xb0r5{pVgg@C<$OoAirGP>d^_aL>Q6*SxeW&x=V7970OEE2}>oMD#<^@Hq(_}_7vyigJsfJV}i2xS0M z*22zxg`ox!`x$(L44m1I0z}qOs(BN&<#@vL9fpl%V{zTx-2lMm_e}(-u3hUPifZD6 zg=qb9GNBhWFqpRIa7SOpa*0ZhKM0Cs@{AZUM{roI22ev$=nB1{53ol!i3>`9=sP?T zdb%Hp5(KxM>yVBDDi2ZcBNq227#bz>#xTT|K&U`87{QT%IOgirS?H4BQ6a*uIdNob zs^zt*^FgMZxrCPfv^bwXjkT=b@7F6#|mPCsZDWFQBhLyPV&hf#62gm3;Y6fn1X|z96fPm zmeF#bI(%i*={ked3r_H3@rT|QE)qm^J*_QImPP=M%MFe}a)pX3!mgoYN!=s(LF8gz zbM$3D*yc_4dr;}L*E#iE4etuLc@xUR)GI{Zf?Ginh+9$20v)u;z7F_pI#y>>gfHwu zz`3Z#B1!G61)o>r*sxJ5eF@r*_b#WhIs8t-uB^PZ@lnoPLDX?dU0>JX#A&`_bhOe+ zySuHUqZa#PJ#xfwc?7K$-TcjIp*Z$8V_8aPI6P_UJLLF{dG)&&>I2MnK-X(^l8cmN zjndonc^GX$5hj%BTzs0&0(x$=pTrv5OQb#tTsPhbkB2!)vSq3ol?FIhi4SVoFj9v` zCQ3z{aNXE5NVS0@KhbVM5s%VM8{|>=u^W)?F^U>JzFCSR`yP`$rqkNEINdpDxEMOQs9oaq<3o&gDF`?a!O~8{4e0s>E4d8w=QnHn- z=Pp8Kaue+)^_`RigNrS}uWiPPL+NQ ztUIiC6a7b14Q1vSwBejpqe@any^uvN8&wiN^^5irc3O^BUW3E1{stU=HlHXmWvz`z zBmkc1N*|dOc7=1qi&4D^#~htCi-(k*2o3b(Y9ye_lJ_jj8T2T=U=*d%owEVb=yvyW z9h_NcV-36m#nZ0U1Ao9k#j@}1vHv6I<^S@2{u_cW1BKL4WQ9!AY6$wrx4-thhP-@S z2!dy=cL(lZ(aU-uSXDU91D`S)%K;ujTkr&=OSUTT*-hz#f@5=$Y1x!K8%D$koJrIK zt3F>;?$}l?00p;w?I;wqZmi-*l-33ZeU8w$Rj^c=i4zAIKHF4U(e4iW{Nt7_evv@Z zJ4iJoEjr+}PGUSB0F4iP4H`am5F_!CUaXx4M3P?a<+TFZQgdj?WOYMBzv_DRC8!lK z&_F>p3vzF;+!&Ji=i@W)*!Alw7D(tf2~UkFz_yzU(mwD)c>`!*yXZT%8AI;#{fxU$ zny?;<_Tu<`3xT#kkov_TZ{xE2#l2gC`wn`6BZG3&U6hJm%x_v8j1_N#Al^+VG^w#* z7qJxb6MyA?CO-#<#+TFO_3MC_keet0QFGMnrmf(3^7x)K3AqBg^4y)!wHNKmiD!9z zTsX&{z6i}}l63~Qkd5aF1t_1oNF-}ETegY6M6y=dn07~66-bSBTV>*X!G#C3@Rrcz zhZi7Tn)1o+E`6wD2pnfEtfjK6uX^ zFm|Y1Kq!M$#sSDQTJ5@zT!5q)s9o!uGM0LmC`-wdI$CO*-OR8khD9i3U^bFc9*KZT zBzxrhY6okokn=PIQMBXj_cwnj*o?`cFGKk!Z^IEu$P`c;xG-B4Y%h%6-;SwGYll5R z$RrKo?XSs9<|0BpARC*`R!gQwdmI^V;N}S6qQeKUUI==)$3?iUVmhbs``tke7>J`x z6$7OJe_1^Fh4-X+3}0jdy(-99fiCJ|YThWITEo{O{8|@mN2sQiE1mMna0CIU#vv(ghbvtOzPJWdDyf8qqA?z_9OrY-5x9CC587LP z1K@3Fnv&%0gb5Q4)CAvru;uAsxPC;7;4$T39nC|ELvrZ+KwLX54UlMng06eF0Dpxj zm+Gq=j{AHhqGjhv&yz*a9w}$Z7pA>>Wu>2mmyLJxmPSy5;cT3GF`djr(H`D?Zj#Ry zXwZGYF-O9{^tSM{kyb^TQ;J0hWef!UNEZ?cB&(RrSwepm(Lg10E6+5(4@l_4H}8ql z9zIU`Ef$(rm%-v5uCM^VXsx#`hn~EH<$HkY$7`r(UN<z2*?Op#v4>fy zAqP;zqqGZPGttM841(4`svr<2^e0siz=AB9H$x*Z>bCFr$Wck?p6$MX391#7_b`T0EvJZBk^xIKG5 zinQv`1bh`~-i7;vMq||Gt;Z`%&T=_#e(kGlFnz7hCF6}T5V!dX_`KqgIZAFDT!}a| zk}$z$x#d#57DSV1_E@4qM%bsv)u~e?Z&WvQ^H(6eWn!x40qP5#6Fjo&xD)cCKLAINsf!n{b79_~h+qF6h`hVR#w-j2@)J zM*+G0;h- z(=^3=Tx12)w*9LK^Obd+eDa#TfHg4bzbCSVGzZWZAaB9ctCtU%q1orW*&c^3D**6$ z4Y?Q}%IhhYbxJ4f5J#(k<+l34IN{FQn5b3f%T+9w)d|yec_h`<*-6t&^hRM)X=&nB zuY_eyV|bAElqGF`+5s@0DDm1|4NIqN7%_yC$19z;u%5K?UNYo-0BA)OF)l}dC}Xi- zyQzLp`pg1Z4RM#?PzMJGcs9i9-c01sB|%!lv_pcMk~?5$;7^l}l#|J>e=i!_Fh7?FiK=_}NYXk?6h0%zBueg=+8NaYTWT?l?(PrT69ug&4xTR@du z9J){Riyr$gVU~|SR1oiQ@U+3qtvEpGWFr6)z_&p}Yq{Y@_i1Z=Zo$?Km9q&hO2&oH z=Y^$yvFCv%D~zz`;2iU+d=VY28+|W8)$ABY*jsF6xJfTvu< zSHK$>}rw17!+4<|ql5?4JwF-2_*H}MGzhx@N^t+3mJ(la%AzxNTj@k2z5(UES;B_lNtqOt0*5%I=SJN>FtP&HWWFJ9^QlqpkBjd)^lSW&&-n+g3 z^Bq_rRF=jzzOZb%>wkW*?(i^ohQ%4w@Q_^Hv3XG36v1O>Vgv7Rg_Xg+I2Um%!YG*+ zqHNnIl}Es703ZDluV`Cyi47fvjpWlO_DZNucn*0XBYKyQkJvyNG?Klm^UO2w^hEU( zB%J3%*y|WDw9s}X`kL-&Aw@*o&!S8SnzNNU!jqpp ztAYyaC7a+Vw_MIjOq5Es4e>a6`g8`GkwI8BRiqPw5&a;@?N<(~pxy}zQ8U08G{la= zJ%HXIwkS)c*_E<0px3J>4?cYty?;;@?(H3UHtKU^9=amvj;}t!DO>KTAoFk|qY=-z z`$eR%-xO*%Fg4Ni@HtQ;aU&pLB@03=*D&*nA{Ajq7178j>@&o-MNrrSR(d3{)$UB6 z6Bj!h3S2XZda(o(TGu=!3hf@Jj;qvtrY;+%oOAG9Vc7OBBhArfrIC9|pR1QHo<+?a zd0}NhEEI|9BSR+JEkZ(=n*KhTzjd9a=2vK2psNPDV6OL;r%Pv{5Z;XL^>Vb>uTqm5 z%YNKWNiXzSCI0-g`1HOL44uF1>J1$ro=C|Lxtx+D9kC8kDq(;d+b|0f?z%SxlUkwAm1*l8dvATq-vqdqDT-PbPWg z0UoLK;aYU65ySWkZr*8Y8oD7dn_3?flnE>)om`}?1Y^>aC`;;b&gH{H0kF;;`zy@b;Uma%d1a)U9JG{Ji%YIEUukJ{Ky;X=kpx15* z8pkvHhiA};#{nu^ogpA9pr$?^8#`OMyTNr==Db@FXNjHyW`s7$&Q^Z-29Bqd(smzf z-81NkngW|Mg=NLG)t-LHC@S^b!3jdIE**9@K{%~QO+rV=@*Ru-$#*u=eD>V2jW)?b z`>M#Y`Q#rW;*O6$Q_?VD_VyRa$t%e85L)Sb6h!SV zs_KmqR@{s{ZjM8{4LabfKy^TMNkbd6M}b{Z9DBJsjhqne^Rd7mJ{z8frqFeyHX(Rk zlt>d#p$l(4wds6Oks-l^?6DBcyr9Mrhr*8)fO&RG4b}3{gGF#J$xS zN#+ettYhJEL)#9T!lDu#dhS?+)S{v+0*dXNb%<0S+Hr$!a4g-{@Z^4LdplZWB54x( z9PBp0?wIOnwDGB4K^lrJI-lw`LTUl2dW-Od2hMa5wZQe7ARDCJNKyqzZpm{vzU0y> z%y?vx#W`t7vC2wIFII{R6`2x-K<=Y-6V$Q;vKTsByE{Ji zn4*6RCwH4~PWv53O&1jvN4kS2zYaklJIc~%{)&-nx4q|%=H_99Ee6}`KyyvGplKai zdRSN0;5v}IG0;@5QHp-(C@iIknNwq@V^3Tdhp`cop@01N@w8DKlczv^lERv>0gmt2 z)_L?45ZT<=^7eRzHaed5Q?N(hk7Ip=Gq)7c#m zAdaCakSr|J)_!U&bVLK0kRy@1PZwZ&%je#1YHrrXYaF@aD61;1atccepuk9|s$)Pl zAuz&CZU~1ui&!t~soS@7)WQIC51BC~Dx+7lXklKJ!k&&d?+C_ef8b$EYSYtH7A(KY zP9MuX+zRZ=d;ah{%cok+MtMVKkfgHQeQSQJg_v zuCxqcgaNR0&cprhL{{3K=G5(vP+lIbXJK(?vVHyHh0xxmB6X6;Am{5%iCm+0!izZu z9Jp|k2LkHjf#heBy&!T}cq%lXUN?$Mg~%PpCN{WTMfD}{(y=HGM4ekGBZvd9QxJ&& zegI{Abo8LyXO?3ZA^1^?DW~M<<^t$D!4|PMD+8$voNvPsz}25`+qR85`}jO%FKEjX z!vW2#5KzLv0JFW2?>&ew3pfu!^Fdh0XB7vjU#FZA_f5z?7U&JglBU_LWEg5;ppMMZ zFi=+m!adpmn#cqtb;538%#QHh+&Oc{7mda`#bHh^jHDG&ZBb+ls-reiQ5ycEc72Rp z!*w*?=NcwD+du4ROp&Ez*iB;HaC$Ez?h%vtb>jt|COhnbpYl%;LIX0_e z=5(zje6L_PWh^td9L6N6S2(4j=y%^!*!Sg=`vwj_aqdBbs>p2!n1w&l{|T<71PI-H zPu`J?vq765%5cj>sUS`x*G*H%nRy-f5lg0PoGt`=l4eGciwCV^0I*0H`GP*9KKKe* zHlz;78JEEw(rV55`iayZm|RCG3CcSF4R{i6W*oafZ==Wu&X=dTIg8}p(J2|g2xsqN zL~RE%_`IlXU7;S4!^lJoV^Jzps8Mo=$b+G9rk(C7W61|>Dk|~hi2LBTtOjV>vg0PJ~bD7I|jIyyJexf_j~29_YMrv@u^#@l%6pPFE$0lq^pOXD_-(nFh#M3;!W0nsPw5(|xq zT?xXUEwu31k`vte#Lfer6$BdZdcQ~nE*il>7V-c(dpbvl$Sy^9ps3+@;27W^SGKp{ zf_-WPyb}6=9ccoWV1i$+`k%NNWl?;X6j#iMF$yq&CuT%wFWLIc^&`_$X5{i*bXufw z+SOGDEo}6c{fJ!$V4BU3zf-h@);fMAo4t;i5885tPCX9Pi*3#|$VJ}03pv6C)YVlO zA;dW_M~El(EkFfiR`_A{pl|F-e6nJYr>7@X#V64Z5y4GSV~$wI+!HzaD9t5RMJE3X z!o-)TE>83Xgu`&qiE2r$(5?}hc@D`pC}mm`!_l1$d3 zqxEwDsbOzi!XZyNy%v@l0<9`d5q+iR1`-*{EeN>Iy!Lo_{(;)tA0F)sw-)6sQmah! z^{_G7?)}8#0cvI4KU41*zm;%YK*|usiVkt4^XU=?!nNh81tXN8Ezk$ z5$6M-0!>~6AgEZzSAZl0NFIowMtppy;E`H&#!^d2H~9fKtVJ?=@lwkg2Cy^$gDEhS z`HS`wSVn;WILcDhi?@)fG#;%9;>fGluYt+lSdC)u9%xL=40=-NaS|5Ljv$nMLsfv{ zDEFol)~K|?M(|IjwhL8NG`REzpdCzyXJO@@#2f?oN=W^+g1)&B%%n15Tr8iCE+gs< z;vxzBFpmj$CGxEnKYK<%c2&$*5$X9j$lTKcw8$e;?2-~h6DlwH#xz0Gw30=`#RoS=zz{6EI z0JTvOfb(mlAZ>Sw0uN~2D26s)`>^1{tYfoIa}wq9E-sfGyWNdZb5X1&M3gd`<%<%_ z31$fh5UrfqnL-3}oW1v?Q{l}4@$1BtS|@{@QQ5tYCyG6OOA0BU<| zEd<>pp>m6>*EgIPD}EMbMkv5>GVdb9hm5m`w?#NKMQ(hDrlFx>{5OyhYD#t%n^4v% zq-4K=#Fu!nHL(rK9+~5;Lr~q9WGwA`xajn3#zW89J6xJ9k>(R0ar>hxjwnrl1bk!1 z`lGrBjze_3g9JN?ev)b_88Siz?D27UI;Dq9c)12tmRkPoZ@+e0M+`Cnk&Lqe^hOc>)j6NW+IhahyUiC+B7+S@<@i9JCE_zUDw1%mR5 zi?Jn1*B;r^6<%)rFRcxw)GsD0$0P~8uC6ZPkeR*qoV9xY5wnsg4oO8tU1+(3!wrbF z3OGy__5AZksgGa1cu3+T!di|ioNWN(9p}m|?4J#+uSg?^6%1=4S3>ScZyq!nWMYJN z(ZoIsXO_?yv_mWq={BFhu~B1)iAYpv1&8)IXFQ*tM3taN_DyII63mB4y5r!-d(s#H zo|YR4$A}|f-XoI%X7S}X))i8!TS#EK5F%;Lu>;uLT*pp3a6h|>ijTt}=<&hhT-?Lv8IPr^dl2mXGKHG-hXiH33bT8lQxAf}2h5~u6 z_^_K@0E4k=WpcKDHTx8UaY7YF3vL;4iTpph-aIVl{%iNY3Q3`X2!&EqqRdIA1`P_O z%!F=ZqzIW3N)u6LGF2i&nLs;q~uC)Xiz@;+4|IUJmBPmB@Q8JL@KTQd9eJ5`7K#9sm+xspZhvO(8{p>CoVqJKM zwk%qXD=RM_F3~^cm!b~lu8E{sMR7WI=oBEcETlt?ukf06QC(gAPW!a8GQuIl)HbNr zwaKshDtbsTkwv%E`4~UT79E+k%S0Lmvi|%^gPToYL?OJ}P-dH5oiqIcw?~#XaOvd6 zh}KvbQqoCO01@*%iD<`Ji;Q`yDA1`&8;HgAS%;h6)R|bhQA+#$2<9I?-&kNVhOG*+ z>?8BbW8a&f{lqAVIumK?G-N!*hZiNOv_{PAeYpB1Ytw}8g!X&;Pu*fI(H;3KrWAYF zvxf5Tfc_?1n=AZNU=n4e8)Mn-aHu{}N|=yC#NWk569R}FECq#lu`B&cCe*x0eAXyw zc2cqfd<1IWF;EH_E$-D@HXm0RO{2v|N_1KG4h7gOrv8rW0kxtyR+w2?_OmT5-Rc&! z7P#Du5(jx6{pmDo7{|7h}zqd>-C%8I594Df>&t&m=(GZ*S8vVEjn?JngipkMA# zUdu(Yv(EI>dMm{ckGrV31#|LpW0p%AG^~3$jY86wD8Xz;EX%Ki^DI9j1x4Vs4kt|r zm}5@|-lm3ld%9mqtgNcTNoI@20x%RhMeJPn<_XF{9Fe78)FjY`)2oWl`$gy~q6$U1 zM8k3Mh^fq)-9E%QU(zMOK)yYmY0TQPr4Gi$}XdN>gwwzjm&6rPVGY%^E7p%>-#Gd%gO^!*I1Ez%>c_87vE9fjcpd`M&MP>{fGH2F zEJ@6FV5((83|F>K!J>@S{T8EAz*CbrWyw%R7f*%BY%!CEJySlsBJq`Jg^B8be!EO$ z4l{3uZYkdr-@lTzHqY!;1Dnj8d1GGG4{o8++8xc!4?2CJ3<-1JJ4jz<(lWw)Yv)NW z%L@8_zB`l^p{ckSB>{-(_axFq0~*U2>v6M%5{301V1-H7uAcuvIF{<^fZl|Ie|J&3 zkRY^#vMfmq+1pbb^xE3sOIEBOT%1m`!uIII<$K{MZu5%vBTNGcUP0t}(syCx{6F%3 z^^#2ty+o>Vmk>aTV3*UZ{>479RKTKWW5K~Zr z&))Czyy;c`Cq)u~J-^xO+Mcs}lX@Xw5#AF)v%(yporm>e41g7Uw={*KTS+TZQc{v_ z`uyYSs;VYe4;MSg#h2cw8WT)uDK7%hEbMzFyzsRXZ=PqVQ>ms>7;h?V8iiG_Kq`%? zthsy?T=zxY9woai#koN_dYRs=EuC{mR$pC1F;*$7X22B`{$q5FZ5|QhKGCG+eEO5V z^h}7v0eL8(`Pfs$`QsocQnIv5NH{rTs}f@9DCLReOaoWk!w-aOgIadJ>is^Qf4(`b zy?uVZ?jmktX=}kZT3qU+w5x(a(c7wzRzw& zQ>*!J;{}5dm#~Vi*6%e8fnp;S2R*h2ML=9Q&48l{XXi!(sS-*w_c5J z+N8&>T_!ahWv~s>Hvgo{Et^E#Yc6rpEJ+lQMly<{JG_!@Q_j#WR>=(Ybw)Lw1)3H| zm(~eMz$jLiFgQz)m8iqA((2ayfWjHT0x4c$S(XkRrcocF zG{EvBMe}rqtnw13SmWoBhMhm8rubWUPWM8$zAC%2PCO^1A*4jHeUSH@H+4KE6oM(mmlPW)k(}#(oaqXxKPUaDHFPh1Bk|x3Fgd3q$)f4TV zUu<@6m-cAKhlLvcJ-|eYHDAG1a_{)161J!}Jh5&&pMJSd8g&i;nW*6JB76v?;ZE4u zi@4zwCv7C8To=$>kyn9iJQWi}{4J)yzT2-^62=#F+^y~xP@Ju_1TvI=U9=*>4>TK< z{}>o|9548cfF(&53fX-#f9pfws(-~-mc zhankul&W0JSfH+MRO^uIPSX8E?AX{=b&aXhy86m-IjZBEy1TepkPzK_nUhemqdx# zRQm8_AXCij&mTXw_g|FP^&-*DLBF}p#SQmQPu8E&xLpmiHVXtFm&BuQI=#8HIyuRd zrKqkQ75?Ee!b_}{O#d~S#?rfm?Bh*+cqt%nGC#ur_^!jlRTUkNNIl~L{Vd}_7M~;w zr%Me*Ms$G;$#{G2r=Kt&UikItvy&ru^Ig$$C%V<36aTaKBxp2j`A)|3sd@LiK@(cw zG?x(ylPf%Q%a=Ign6RaW6~DF@N#ah)c{{87K~pK-D5jG;I1d`sk;oXv zE9MML#6~b!NEF%^6VW8fd>5)VQ4E++M@2=D^Fj<`p8>5_7E{N7r`ND3HdW%rvSrKs zBD57zJA4`>_gzzcx_Y@E`z=z3>M;#vG<3^-*xYn_BWV^r%&qVRl}Se{7yJORw|CJ@ zRF6C|F_(r;#S;vdijxDm07p# zrj({HDUNuR<(E`TronTXq*Q$@?=`5oTCb&jTRY=Fx=Y7kji2s%WW%p;es`e=Q>ju! zhnIDqeB1;0q074iREB}DCCdvB!1NWBHUpeRrJr$&tiG*$(Syv^NT&A!*V@0dprpnB~ss^6eNhx~`d zO9a6lU^=ufyLDxy!OPqt#?`oOx0Z#rcOa$?5H?Pmx&&<{^_x<*$~dCkk6W>5Jfqbv z%Z=tE6F|vhJig>?de-n$->=~dG8>K&rmHvGdvBJ*s0}9uN_Q26+ltuTNnn^!CvJD$ z)O7#v>caG0ij_H4U%w6~pU1?eZ7Lemv_*?;Qin7$+%Z~f#{mSc=1kTu4E{M2 zpNYsAf`OrV*7+1Lu~oK3O7uN0#)`*tx%0!fbe>e-eob5|EWCtF(ks&v&MCX?$)LEP z+{2qswzwVUo--8kY8b`Ye_au3PksZn=M}1Eo12^(CPQwDdyB1E6f+*q+;cCma}kWi zviS1RvAyEEl1y;Mp`*+z%Qk-oruc^BYLAy-$fscr1; z;u3`;R~)ggezGuiIFZ04xMb_Y6WgbpFMMB^UR}4bbZQz2i}h$6N8{D@HE`Y^XS8b6 z2+{TOWs@Jp#X6GZTJ~NDt`$4Cc-;hK0*!i}oW`N!VC@CoTSt4x3DkbN^v|!AhK|nj z36Q;74c=^dzGwTZ_g)ued5e9<=*oi!!{LzTJ1@?@SJg|?Axw*ko#AkHWcqo7jE8ds zgw$&eqo0~Mou_b|>wP1>=bZ)ffAF=^vDFZCu0AbE3_Sx}CaUzsy3il6cAxO{@#z=1 z1yAZVsESP;#|mb;CzImIJ78#N;ZY#;>vPY51Ev24gWv;5$Gx)pTu5yh`#=WgYF@t) zo0#+isr6Gyc&nHEV7@f>rD2U5DI?NxF!b-=%3(6mE{q%-PSkD>Z3Kq8`eh*`64R(C zZFnp%rk=q?Ro-~KYgT9u=~3F-s3ntyQoOnCz22)u&VRP;zq1>2f+=@N3_5lhyS2HxqgTDOy-_e|7VUdjfv6hiK3IU3AFU;8FzZtEU37D_O= z^VObVFR0*zv4#k7CHh_Mu46c41XNc%>1coq_matm@o&B2u$>;l-{DKsrA)76!_OXg zTdP^uY5k8Dvq=2Pm~FfKJJ2BbschZg=~N{mG|8zbugJM3Gy=uV9jb-v*6}9V$QSo0 zbcH$wWy-9KAF=?NkWk*8c@9G`yi&jIMF%Q>m&fmY#gKkW9qY4WC=8Qc`_5Ai%Vk*W zlZlXBH}i1%pPPs;0qz9ya^uIhrvOrCq=lSDN!;E$;%3Sd^u(n33I6C6m@nY>;kYE| z4F^X@SsO0#0H5#M#qUUUfvL5W8r%P6*(gdsT;F{*`Nd+6fDS>M<2;=oNUh(f<#@OV zTrDrW_NpD3IU4t6K9%2-#?2I)Z$lB{7gvkrRe(sppm9nx<$oZerVBuc8Tjt^u>!CR6Nj7O<3gtQg@&nq%Gyb?OGwe{N>@oJX5@; zFZ${y67S8gMDk>U_;fx|T*s%Yv|F{>`KIl1WNmKqpQ~SQEb135{3-ranPXq0S`)sO zh#g*zDZT49Cl3K4WmGE!#gx&T^!>vrXT+unJN^T;2#TMn8ne(R6hdZ}GD9LRDXaRq zMcDK|o3g4M#C-6!uE{DXDTq^awy%qi-aUHb^Rwft%iSCC-2@zV!P&FL z#$&f?-!>pK(*$xQJ@Z-VW$5uSjSXxF66*1Zx)@6AEK<=KlAg*(d^4Yqk7N@M6DwDiGH zG66U149TESenFD|#hN=H#dogc85y|k79DTlJ{Sy0!(t2thBAGT0YNG6PYxh!_ z+4TLJbbT{b7DsWcFb*k8|NM?i0$p!W)*Uk$B9;&j7Z_$1;xJ%<^;gW-srjfI;=JMy zjpS=Vw`Ri*R-Emxrm%75G|Gy0%mOQ{qP#NPE(E8iJW5NVER^LE>7go42?~{LHzZjx z@=S$`iIGPkl0+R8ySgS?cjP+t}j3F6meAs5WJ z)469nd)!2y3rtz7QZ|W_kxwi_?cbGBfJ@mZEkko|e*dhL&1j5omd54^MVNnwu#R=< zJdpEEDjLG=mF6NI+kVxA6(AIU=9%o;tPxr(Llab4svinCJsRIp3W zIdKMgn~ZRP%>JYAm<>LMGR_VN{xy>C3XdZta>+aX z(F@uLvKFt5ISooWp^SGU>z)?viqesNvV0i@(Xk;t5W z2H@~2vfU1`fbJTWHVzt#~PPtYTFw`Q{1p08qreE=q3=qBun z%cMAhvVp`a%-G50Df(AymywgRGXm-N9VfccR{p4=Nh&DF=$D1}vgOkC5XidW3jBSY z^wn&{%#t=L>DJ7D?EPl`KD*t=rIVj>zX@j<9wb|Gq~lEne&BR3OCEg6%YBtvt-lKA zZP>r};}>(+%;{;;nz>QL@a%`&R6}D@k@QW1Rgs zCSJ#X5A;EMteHaDVhN6~V_BJBAp(BVsBPZ@#dl_@>>=C7X?8Vrc{&BvOCL7F_AWg3E?m39b#sE1_fg8O9YC&Z3UgTut^sZBG>R}*rBUHNc=2+;& z-(_UAhfL9)a#=BCyiCCRn29HA-1}eGaC)~V=Kk+y`6fLS#ap4eZ24RNa*p=^@J8}| z?od#4^!Dx;S^RiL;o0x|3#3~E!84BF+n98X|8+m4rdoprb9dCr`#Y{hCVWy~mJ!HA z={!;3H|K(Tb|dK|HLrmE2cQ*LiIxD%nM*&bjMPaP3-7P)tzbvgUAuvPjM)-;UO&#@a|unc>_=L`D**L%eW<2xYfa7 z)R<$amjX9#y02^)bMD`x=7M(+lx44xl-EOd=hXUtHNb|>xTYYA{r|jWCEw-IbmS1r z-b2h7Vt#-J=`z?d>IqM(EA@d0_9SH2y|uFw%ewza-q*>a{_y|Hkbw~!jsMFY8~_v;E5p2?WePHFA6C(<4OUEu8q8%0Z*>9!MKWLvi#Plf?#K!yZ4uTIh zP}_dSYgn;o)4190>#GKck%9pc>5MPh-gC~S@23+jKC#17WCZ^4ni=@4Z<=Z5xqgWB zOn~5dVag&nDmxtjN=*hCYbG~pNyHudR&vEn?tVA5G~>D&diff5nYUye7-kaZaLmelY$e{HoLvo6l>S@ z{fJ3%MxewF4@#w)IQC6uYzQYU!~gh8d&L_gk5Ot-`bZnJ>ht6VT?;4%LkwT@u8=s_ zUUcZwIDTfqqvRo{t2@=&T{1}eI?1H0E7*9zkMlL!jrn0v$g}PKF{z|Geu8!F*5R~l zu}S6BejLKWFhk6F@CiiQB)-Xv*}vxan&3n~gBTfr#%Y+4ukN!f8|~_Py<7UT#dY3l z@^JCb9l#zfYz{BdEj(2FHB}d{Q8Zm?!7P~(4^t=J}3n`hTeS~~bx&MvucC;blZ@@eY!FQvCcNe+28s&qrNsS1N2_#x>|^Sj z-zq&t)Htx85FmdYOXo+2=4 zjHPG@LI{C|govqWbdw)?qHk^ENv;v+Mu(9{?FFMk=q0^&NaIEvfTpEB_TF*lfzohT zux7@_#_}tA7RsmQ?eKQ#qv|{7;IhpdH@?WXD$y7Ix?Y|J;X<$sO9wn45r);c^i>pZ zSyQ{yjQ#?+aAMIyEH@dxQ@he%7BaA9R7iTv8_bDUf8st~CYwb~4DilaSdkW&2& z@!JX(l3sSKY9b)@{qucrZ?<%bEg1Xcf0O!T#*FAlgAPn9I6)BJ0}dSP8 zpbbfTytK!nh{qjmnt2sj5FSxFl=!`k?zAf1N_=$d)Tz^B&Rxt`c#>C(1vEwcC zZ^V8jl>-Kzxzk>oJi=A9Tbktl8$)M|QGxbrcIV){oK*1T!*7}vT_m!cp8xLc+qESI z;^ARP$5gl!skPVl&Re`>iI`o%9D|`K&!FDBhN)@DJiy$G7iE|SbH$|lH)&vjgN4_})`J1LT-A^*(30d9o|TT+*TP~+DBDxF0oE9@?(&9rORV{_hcZ6PJ=7xAZDz`L$Pv!WX|DQc@?r#+uhZ1qm#3#<=y;D5)AaX%FobEY0$PmCZCONP&f8m?- z2dvXl73H?Nc)6D36VwM;74+umnH~jU>M(gV+0u`Z zf6GBE&^t^SO&anByz=sihGM-DT-d)nT%MOq6crFHUU8}SNxytAXGmG0RN4DyZ@uN#GBTB0e6x_SGMr8)YCb8UM>(mjj70ca<_S-$1*7j zD6!W07PJcf)*sg}`JGQc-|l!-pm=u1K)>9h?@iv#&1ik8Amu6up9Ujqsb_H3fyfP= zkVL?nJbLfj*)p63kdr>Ad=M4JK+e)V1gX5-9*rN(i{?&*}n?qZl@;`1E>T%y$1EaR+)$kT_W+u<-ddZvwSMF!O7rhAl$0nfXg2#?u8m9vGHq zU^YEbxbRg68Q=lLzfa`xmJE(wzCyhAQmRu+j4Eq$LXO+mrN2HqoF1$&F{2S+eVWCl zB@+h0hT#=6`$e&G53swQ!v_lir7&*6pqZPo@ z$YG+z5nprFsN0(_j+l6EK%(wYC+{q9tAcl)gm7d36+R%cOS(>is+$G!a- z{_eU)<2_amlfO(kba2(2cRgQ^^u51*%g^}bu<C`m@zAJUOMs1O7&uMOTmV zIEi4`{h!XwcmOd{Fo&5&Mywxvu5M{`4+}MmcN^BNt7T_*VbkMzukJShx?MtQC@C+` zQ`X&Zjce6Se_4c6n-SFsCztKm(UOA{&b8X`axwC+VVCTP^~<7%HHv-A&o!g~i@K3) z?3@EtB4XB)({l%2sB~&U?O>`9(N~= z>h=w`fdd&oX;K*HB;c04;eLO*e&@bmuZ%GwkYTCu~P z`6~?b#dPzG@V2@8%;va%8Y)iBf7g=1EvvYZ6Ud#-2RS{ycDFm5#U#~UcK{3zXs0Q zt5&VDWMcf2tfj5KzAIY#rl@4@x`@K=ZQHh8d*z?Y*RHkyaFz&L8bCSHUcLH*apbzV zIM1eWf%j2n*T%=!-{Pt=XU-f}i0uLqZH;lzSrr~0KI(VOwr$%+{cgxNJ263NRYccI zSFfr;`k#jrkK}A6?AoOO)okuLr>&ee?s=0lMh)FZK0OssOEE-U`z&u`e6rs|G|EPeORnhiv zI7X`4bgn!_S$(``d`{7yFinSzF+=aqTG8~UvElxy{4T@QKcTUi7!*SJSPJdB)~Fuy zxK7P`)0{UM4*2r5HuUl*lxx~%pFh8Q%$tIA-*BK{5=pCPvr4t^<}2fNw7(XRkpJ-H ztX7ObzNTcHq~*4?>v`^xKS$5-)=9+BfD1u{q;F8f-j5HI5whhsglg^s2tMw-ZXO9F z4;T}T^+h@8^OPAL9@-!pO-;=ZfL<}(HN(vd27NNy|NCZgjB*PvSG*PBnzo&SQcwBJ zN;nl9SytA5`(>Sur_TkwcJtrWF4MG0c0|RYe*s8(HbI)e>GRaGN(!1JE`%u6a_RPS zS;gELGiPcwZCbZYo5b1an@%1}PxqQ^p08D%Gia-HRgV}svIN6l$gyKx1l?3RCD{Ra zLXgCsoSCmNc!!Pq>pGLCObI75J`8)n&^uaA zd6}t-Yf5vIT*8K1F9>jmD;?I~R3kOe_G-4Wu2yC@2|_TlrF8D)^1cj0`%Ry2hOzMS z=Wms(yZYX*&ux(cEaNd0UR_(&u8gX^a>c3XKHpiY3B%dlwPNqyb~e3xPvG0&alY{A zQ8WA$`GH;YXVWA)Y?W(Y`dMOI=)%F&!oSq8c){-eeY%u8bno7MGi0uHX?W{E8U*6jj*41|MMpUUB7nSx)%xxTEIgn6h=lyCTZ=4jKWbESBj?f=<(~#%&yU3H z(AwO*J~ks9aSka3E5pOvz}4T@&~kV1px@XgsH)%lcgzf~24sH2wDd0%f0zWDedS7I zmod)9uV25eqBd{Sr%y}tC5M>wSw$_;rY~{Q)|@?ic4EQs6puQjR&$S})@IR!@AfL4 zM~)aVo{U?;y*N_-m~cVGR#9-$f;$&ZJGDWF4h_&jb5UO{>Dc?cvYnw>e`MOLb5}Vv z8#jMx=+{9NGYyS4`b0WKzJBw@AT*nhdwBf$Y%Sas(um+0wthx zgc1Q^kO7VShC83=2rd1&ib!g3}pv>@UYe1YARURz_FbT)J3JB~eF z+o1OEUthfeg?sPLguaWSxbCm2`+ooMR$AF;^E?rWYH*cP0r%wGRPi{-m)A(9f=eXL~h?vin zdNGR^$eZ{2s2mZo-kh971sl3^XRDeu2KB7Je?CIA_MbgF2hH%>@j!E}+XV@*q?fN; zSr#3wPKiA3#TS5l$eJ}8BrpPH@8@-mH~@wlCX#I;j_+7J?Pk*}eU;T)wSSut=f7Z( zSu{i8pFz4t#Kc^p&>xra)h*!3lP8{=R|R^_p52*vSzKNo&b&#(ys`&5A8zhwZJ3|L zi)?LXriPjt5+3fjrQl3P#(C}{dU=1G<7(uH`G~uQf8IVk@~r)u%*dQyWOMgQulzs- zpJi^))1Wn_F#(HJO-DO9sRsY~H9ww?u_x~=nq)iaFtut(`qG zqP?F)N8a6yk>VRgPbc8o_3gcyLV7<4G%`$>-GU*;JRARKmm2T`Dq#Qia|?(l#Q-l& z+wo*eGgoJwu_Nm~)=Ieps6CAO&j{yD8HX0{X_Rnu;hkoOxhIxAdW3J-phMhPzdkBZ zhhuM_^qFpeq)OGK3v2#^k|W*;`Ly}9r+ivLymr#!sIf1wx-lywk5?}0!+3Jl>%G7YRtsMlLN+$YYD(sp+aTaX5*ecdn%%0 zMx>3&=nRz`1#+8a);2S@?B|RoCtJNZ@TJ&X{p~U5S!Na%8d=pJ)CnjW#A>Tvy)NYD z)*(W+pWUbf7lH`G-3{YTK(lrNp%KmPF#WAa3yIkGTHZ4e#k5Z%x3^BacI{{y)4PBF zzNMw5$y{%}Auf|9&6<^V_jg%QI|QUic*kGGJAZ*OmQmn~Cp6T7jbtJbd{ z3dT!rqg4WdB@u(~xOVGSwOvzhU%Px+72QQ{9Zg-iTcSR>gw67AJX|qUrAhX_vuZ`h z-JG2I`T6<9{@O~X#`uX7mm|>APGXk2BF^aizyE4thq8D-%YUVzR_1D^(S%Th5(xRo z<|B{R9>Bim7zgW#7bCQy@*q>V;O>hrNqWGDVcDgN-GBiq=sB?f&EoG<_V3OiI_sav2&Y#<4D6}!XKXTY(6L^m z(~DFehG*FY+$gfYkmI4VaDm(Ec#Oa#EBClPRwY= zp%eT=;_*RqiinKtKWI=x_>ma$&if^9dvd@nTGKKmhk@|%y4htso-$>MfTf3APsd$D zw?ODMpb{|;jvZjswrx&PQ7dYpQ^CJZmwwObv53rEpC72-s*j$i0rsx@v&Z$gHt^B4 zBFzns&e4TugW{vhG-JC5{rF@Ws!78~7s{4y`r(29@~s2#Y}`%rZUxB_^rpLls%p)u)hh7GriB_adiCm6jloUV za(qa_8hl2IpmP@QAsCl|-)bpn8oWZOsa6xD8lkvU0S!yAR0|Li!Bo58;Mg-wa5LA% zd#q3p-pmF$Kb}fgt5jQzOb7wBQ4meF)~;MjxXSe*bJy3i(!6=|Ch(_4^bdlcm3nMf z=W|aEcV7SHMW@&?_*2%dUmwcU0yjqk{p0X8ihdw|nLOG#9g9&x)bXZ&Y0PHMEez7{2yQ6$=h^uG}JGB`?g`z zrcIfY-|96DV0w4_S4%r=ePLd=sAq{BM*W!D*e2bR!KgTPqmA`2fuu7;2q{zIC#Q` ziGE~@o_AtXwY2*>6J#817JgC%h-6d$HBRj0dw1V5saR_BPG7Znn{59~wNffp7 zBnIX#EK5&MUl|8%95-!Bhz{_M=N$CqHN-;EcTD;2l()G(pWKSM+6Hin-#eFYUR+id z3K^Jl|NgK$N1W;+&Z~gf1-9PY(R=iZFW8oHd8iY7ePM5aK4_OpQ1{Sv`d9{CTwEWo z64bX1R^NaCcInck3cz7X0Rsw}^ED>*#2=0VZ-8>pDJTE@c_n%v z?6FOBZMNaKQ@?%+Jpao9dWwD-8!tx2#0;Bf4q*F^`{oG6fZ?%2&mZ(*Jq%)ABS5Yc z2Yjt=I_s!KStJ?_$y!5LQOf|_ieJ1)t{m~H$;YZHllOXCTvzgP4PXM$$NU2W?J--A zpEhme%c3@mTX~FJ!VpobMPJNAHRTdp(r)H)vVp2RVW(8Rm^0Glrde z4swtnpxx&M=G%gsVn;nWx1gS9CoC6rfAF90RlsVxkdW89)KP_=+upp0Q2#H(^CTE!5 zt(!Loz=2RGEF&b2N9o$XxLK01#Fg}_s;X>|0pAs^xC&d?#?x;&T>oN%Q#GhU11(1T z^_bw1uS2CI+!bw9I(?OmoNf7Mj=Ey8;=0D9y0@o7Wh z3i^9{XWd^vF zhanYojEu(4CZS#jh7#B6R4qLgP}akVblZu(v{mS$L$#cv#r|y-P+FVQ%SejD$azxb zSvRTN43o4N`RVzn6+3$@b=CnXQA20|M1`<@zqn)~=(9sHa03BI=aG8&zQZ{^LZaJ; znAS>LmQ)V|P(OS7+GEC!&CoE{@#Sz%|K1C1?|H4r;0-3`H3ZmFa^j>NDPkrMTC^w; z&fX!0-p4<7Q>U6JvLQn|IE>%_t>%c;%tzc^dANCED{6{FwmEA)ZD` z-As!RYpt|9Z#n3BZB~;VI-|e!EUY&6%2@EREk%?TtQUPql<{9DGH0Xl&vx3G6=Xq& z-%p9r!&@$D6!z2Dqi&BLJ>>hq4oG)t@-t1X4UfQemU#Ji#hXswpz!Q8tI(z}++a^i zQl-K^sRcE%vf}w~#X8v}?mnt*eg_DYZ+Q9x{`_8&>-LmO?*ky)H}*_-MKMWeb2r~6 zOdPJS5W0pX5XB`W%cwtm&3k#x`0)AKnZU}N(xxe6{roy2*@U7pD=B1>(y7y@4O_QX z5GOs&XPT_LBB777Ud#uQ@^{OYW=ua=fd?HPdin9OoHt7>NY{7tZULA1Fx|H)pCg>! z1*f=BaI%|Y(#eOf)P|ama(8I!-L@B@`GRvq#MQjVkDC*nF9Hd;(+#OhTlVmK(r?h9 zm{L7ru!KMYcMdhvW#hxnl{GJT9|3LloH@}m7LRN4e5PY<1$gZ|?<;oOHg)H?U1@1+ z=;cwP7I3(~?!=KJ=SgeQd&sQsR^$pNSJ#U$F`bYTsv$Bh*%w^ges`2uy20OJ!m%W- zZAiDdLttRnP)&b;1R|c20(#Ihe&{!q_YgjeJqp3l(1cn(4^j0!T`Q!i{!ec^J3>@6 zL}Z9qwW@4YUBmoogknXulBd0A30{s!0_<#<9Ek*&abo3c&DxX zrx#^iPF+2-_y*TG`jee!6Uwsagl`m38l=L-_&=}fsbSvqY_={i-pJW% z?#sr4>iG-fF=tYQncoT4vbTTy5AN&#_+9U;b8<9IH|*JCjL3F{^HN7Rqb4xK%McJ| zKUd?nl(!&*=X$C z&o8}%BWT#Lp>D5v=GE2JjThZ>jthkV+s$VqIeny7rT#&j_#a34@7HmZ7F`HX})ZV6V-&VG&@CtTerp}|cY}@7oMd?3pUdN%m zIF}T`iQJUUeU?~bJM&z>_V`Fd(N0W1e_fc_f;|s95QvwaYdX&?TQuc9a^2;RVNELK#!e!U9XZ1$JF zT3#b2JzCL`YMnU-m#<&1UA6SVMXI-A{FXzV`|SUAw_87Hq#|@C^7XVO4itZD|eA!H<5Fk0NIdcrXuH3j$CuEb_m=`Zz zMCF0;4xc<}abgL4f|3IL#LUI%bZcYdI;7T>Sy_FdTpHhM+HoPZxrU(LdGmr(#t%%Y zk0fKY@RJF_3Rux#i)-?!0UPaAmr-@>w=$U4;(N>ZJzn3oswxQV$et~%{G-P4d)=O2 ztD}zelGvk3qed$jKcYb-lI$(fWHU88cP0iZZ9Lw9!^e)@8SjxlpaxfM-nQ)-jQ<53 zR3Kr7zAOh{`GA+C=42OxI!_Ty*#-)Z9)}AAHl1r>^$e&J%{?(#91HhZ;bmD6^>-eLNFaC0|f z7=U72@GT^eHeI_mhWOytbR%Ad6DBSIGaAJ@s~>^KpDv4`i(yLtV$FVpMj0aX-~@ZP zgP977)_B%>BGtk@u1@q5t46l{;*3CcrOI4m?A%T~4-KU8?ILdcNJWmY4MIHQemN&W2GqSI{l5CMF2(Mb>Tr{Vx5&TI zHbu}?9G5o4Z{V5Kc((y`hUMnt&gzmnG;PIHDdntvoDl(TH}#KX5F&K;anoe6B&tEw35jrt$D>H zz$hQeCuTf$fJh6Yw>z8Sc!hI{VeB8pRA^qw{d+#2!6zW0%vKfYZOF7=aeh|piOm0D zc5U)Ydxbw((RH(>693;`^Xf~iFMSA7K0>pIs=@fllUIV+5)dPZQx`dY0idSb*B7KB zrO10wlWvL7JeXzc)-@ubj7;sre97WBZyJDyjQ6$~E7HB5b)c%Ps#VXP6VS#jJ9o|x zq)anB)TDXyC`=yxRF!)7?oAx_VZc+`*n2#0(f8?%aD?~?)s!qF4L|3KlQw`F5>766 zZHqp!0?EL!&d`iU1qF7h%b+eN&}5cobL;8Tu>p5_n3?Ur82t3hhl^S6^E9EJ^R z!mA2n(92@Hz27Jjb@F!fSrSDqrDg5yhQ0%gooy)x1OueB@2tgOmBUkRwFJ}&K3{7T zxiibNBUjZ9F%&Od6d!PosWD%qo5^C0NmOsBZyqTHznQN64JL%zh65NSqo^%SE0sGg zbF!aRr^z{|r&hmLL+|rrxyJhQ zF|W&~fOnE~k=P9jR#Dk+*s@X=8CL!r9|0BQz2G{Y3z}3^)zor`60w^%tMVeyg6pC)22b<0G@L|q zZ+SK|Q>r7FB!z{BM+z>&wh6IKka7m_AfnYj4I|&>5J|Okb@``N%gV}3VwyE?ovTCPSJnr>-2?EpraQXm+_n;nN48am^mWs1G6tumw_tmBq~s|C!MZ4IQXQ|a_T zWJmraJwv)&!!Pi)R;i4jrxqtjHkt`4n_^tYg_6*-@e|IQHvrm0^(klcBrmuZwV+MolQSxx7olP1F{Ruvr(I>FVwxAIrz5q4(53`k9@e!{U1{ zY646(pJzJ4&}-nJK@v3lp+&Sejkg%1S8#4gFXBn@^XGL&j~+eO-nav*>QcE}ETUan z*?Ytj?F$gLHD%7XstNRt*Ai)K_3G89OXD`mo0~Y%uqDwT^$TQ~=O?>Q$?`q+RN@+i z6iB!{Dl-i2sm7Gx#Pr4p4zXe9&L1|Ea!Y&Y{iONC_EyeaBEp|V}QwqyIXPSclCRs%YKJbi@$fqygUS9*yH4C z4l-5}#2A@1vNw%0nb(!weK}u0Qz4`{V5$X>!Y;`ddl*RTb)2FL(2yla`G4r?? zF$E0QR~#&&_q=~UL23C_jJH}8e6A1%;*RRCtfHb)@#t7PDh2uOy1!Zg<8W5STLma*WMpuo=-CTDbf|;$6;y8OuX7g? z1vA&u@#qtfZJ}_)n3U=d95~R;u}z1#ZQG(rH)2}gJ=vm;g(MXHxqkh+U{;aTfs>PO zN3W;SNW3@)2jeno-SI>N@nP{9ZsWooF>2JtA4|TK_UUPF{Ql#|p*F!@?+;iJDU=mb zkVOTL2-$U%X=Uw?9zEJRG@B6v2Tg)Etc;D-V#LQP4tOb^Ym~{y)2Spzq)*p@$qgln z2o2x&*QZ8^_O*KV&fIi!lTkgv*A$ad#j6?&;V^r6_lO)9F8s|_-oyG=x>~W_S;TOG zv#*nz#w~gVZ`dD~$GAsrc~{hyQWzRJ=P)r-2v_n}N~fOFYC-#6{-?9z*3uE!;_KmS z&LMb(qO=O3OVC~kOkTOp!{G}KNLecC|OJD__6p)BpZ2w(i5mQ&ULl}o$Zqwdte<%*1kBi!v z{=H*qqlZNf?5Gu+`r*?jdp^|Pavd=2GJ}DQbD8wo@;`N+04~7mI$oRxkqg=t2iHV6T8x3`kaoG;B-l-%eL2_J7VYnM4w;4O+ z{)VqBPe$s54;z%rYNbS6 z-4D(DjfDRm^Q|FBGR{a!0l@Ln$|p^S*B`;ybLkAdc`7OQzf589y<#9m2v~KW>&&GH z6M9zu3Tg+T*Zuid5O%VabzTXY4{70j6}{7Drh^n6iYe=QF))y zqjKAS;r<9$S69pVZ=cLw-MyWeMsWB1f~$BBkxxKeX2TaP==FA_Aa@A9gUCS1r4tTgXp@`z2q$lObploeUE@>?80qoA{USsnEztxZf+q1v1V4dQXD zAdkA^;+1p_Oek-eo232M+|Uz(A64V$<2hy5`c);@OW9iOIAy^m#Q~2!Y6}1FEj-NN zdtG<-(dWfD1_fEk7h{A20;$qy(Y}~@Vt)uDiDu9rw*het$|4$t0eFD{KcQ8^(W56%=26>n z1@Cn}c>~uh^l7N3j0;f9kL`H$_;GuL#eY%CMgB|N3*Sf~)YjP}^3R6yF$o_IA3dt9 zKx#rhYt=WnzCy2oquCuiSwh1wSD@I-=Zf9)|UZ1M_TFQ(xk~-Q_QpomO^HeMfXf^N*|R^!8~B z*%ukLF0Q-yEJDg_dEmwvk$eI%*&fAAu?uU8%dG*PalG^0h*siFgwShJT%S04h60c} zr7pkxKmX0zO`C@AKVhpXP}j=9%M-8Alce}UYiPVnJSah@D7KwDtk{eofmf5rd}iKE z=%B#ycF zxiPzdoi__r2dE7C0qBxI}zoa5rcZK^zh2zyU zuHp~Mj~+}Ly~9dk6|hzP{Vl0;mu}qz96;b}wP>LZP!S!8fEx96=zrBGM^!24ckeWt zB8|wxyKn>|%wqcxV+M~r2=RbA+Rn`}QEk`pYmJ&WZ%$ksd!*~Z)vFr_miuex5G+Ta z%>MXlHyKu{uo^d84KMUz(%L3;B^yXPz>I(mg6C2>qTZwjdW@t+6%pLW7tfv+Apb|flFp!ZTw!XyLWE_&T!=ADaMl; zfb@NER(L;Z3y(zx%muSH?$Mv=iVaY+=jOK6$9ttjGqH4S9;rK`^#k$Izk)|@E`oXbcqk4 zN^au;#*`-K(KN9_ru>R%;5=i7Iee%1%^*|S@35%8BjZ0M`N9hz$|OW?PTE82mdq@c z4c;WRO5Gu!E~RNI>stqAOZWrL!K+O*>*%F;^ytx4ObYzOgz@7;xQChZc3lB>mw<-2 z#uJBcS=;x)uKu;~J_e}N&Wy7T-u6MoGBXmZkmy%jG%uUsPr9WY7R3owHJl4Es)_ywp77R9VEko-;f)zij|PV#hzxE&U%&Z(w?fTT6OyN`mn4WODF+HFfiEKFm5A5 zfO7v63&k&9I3=uT7S=uCKE{k0LnmY{HwR<=4-}5vo1DD7x;e#tO9t8p!a>%t;yQ0d` zo#2H@{vIN!&E@V%&j8v${{aIo4a{4;YSq8Em1Qq(1#MF*+Sd&n)cJ@hHRQQ(=KvwSCw@>@lAyh?4w)NR?(%_2I=;vi+#Zb*4kUZ@FjjyfH z`CV^F#_UP;TiWQ70YK_^Jv|LW!^3@GQ$;C4t!sSiC+%MhP}hajR%42o}BT#7l2gt=OpN<>gdjEtnjcjl+^U zP)Z#!ronq{jd83U!rM&EPwP3r{*y}Cwyk1pKh{4YqU%)H1VK?WIERV1*2YE;0-Jys zj@TsIw6&>KzyD4sM(tUe7G0*qbhNdUbslhzJ)EL8`io)sn^P zC{(ctK(cr>+7aQj8eg!^&Xe}`r}-*|9_ru;dP>w6ELeck$Uey(-2(3dRQ_FzN=mr? z9X39F{qOEt)X<&isiUZwZr#%T2(xiS655UNCg?+1J=pxN`jLgRq3IPNJiiN2J;)b04$~E5*zgcLZyUS`MW`?^JQ|ySlD>+e9ORI7{;Y`X@ zh<#z(1N9O*AHm6seAZQeMd579XxLJIe_C$~A_PpWVuyxPGnA3`x6+Rtv%_%1zp_jM zLpKXlG_91nHIfq_BM(zBk-xzh${9LhciS9|vmgGden@ z?=Y=*7Ns>GH=CPO)YVW+A<+cURSzcS|7ajF*?3;MJ(GRnHc|=YQrc^3YcD5)JtL3E zY540pUPWx)jc+B0Paue=@)T~y^P;mj&^v9(3{No@dIx_urtNbXveQ)SCR(xizi@mB zAEyxH0C2RJ(~5k+TGIN8?CaO-V<6A)=o0sK{QGF=J{pS@6y@dVSTX>`O z&!`b26bclcB1?nH({wUO0R@@~e%=%4e+eHC#q%1BFfEZrhWgm%x0Q-d`d#?qO&`_z z;w2U|e#VF56B_tXb7VS{PcP@JCx14quW?szLk#cd?cIu@;O8-PS5aj95T`K29x2T; z#+}~;bsKsIu#H*8`wRLkSwOlTPxXB%sZQ5rsW>8C#cwBf!^FfSn+iw%6~l#L2;Oxe z`stZ6qvjt!V@7p;Pf@HVPE05*+A}h)HvmOJ&()r@hm(eik&V1%qSk*#p*)&IRxq5 z92uWzWNV^!*4}>BHn7Yyk_>%#+1$-e%>UnX^Zln!o;KZqh7`Y%zv$R*V5=%)>rxgt z_W)!makGav&JtdacXHvXRlWslq?y$jnLO%h}N zT|<6?e}ZsOT(_Clv^_xb_AS@6>O0PK32kcQj{I{Ikqsqr_9DEeg#FrhStxHe9ng@L zq?ymLvH*pY71CJjd1#al8h6w)^6*7W+l_my#B?&^wfT`8;z%?Tz*G$igNCSACWC@a zTdtua5IV>F2*qjxItUuFmO?-&T#_R3K~Fd|7`zmd6Aa~TK?0Ym+q>ZA#f!BOut8^m7x)ZlVJvgjia=UUAJxpzfoisvVa}JVavY3 z2T!F|zIr9@c=IE-h@qw}H%&gFdplYhRuq^bE<#XwCZB-OqUyDiv*&&8U{p|&gG`4B zvZ3Imuus0egtFI%mOBRUSO@{3xb^NCY*d<$FsG{a#x<{6$dIT!@tB|$blbc#@CNA& znT0%b)l1Qf=gp)6bpvRWPc9#xYi#T0n|wD|TvgNurSM-aZ#D(4FmgtwSEhYLp}@2i zMsJOP1ki=q#_q0z2Rp(~NK=4xp5EGYGhoh~MQ=RZxERsU2E~L(vZ8gUCZNTh&gh92 zdJ6)U;+~=IzU}+-D(vXVlP61y&MLV+m)0Lu13_c3t{YMgfh({DuBuhHVKHF>`TVie zBTO?3)l`PbxJY0s+?R!~UWjqxiB)j56^&^_5(hLe-eYX&(YLo--PVRrTW56wA^0}Q zig0Sr1b*e9KDAu8l>O?xTr$02t+x3hp7v4t#wL4i+j; zvE$aI)Nz@oUoKyFAFiQx)#Ax+3oaL2`Dp<2F{ixo&kQc#xrdqOuI3=v9Eo3ql7~X} z@lvzMh^~<|)jD;+0>X(({Io^O{$=Z|1v$pq6?5U}mHJf{5M1rC7Yc+2YR+-N`sImIPkn_f|&=0GIXL@es>t-p*Afkwmp6X1_Zdj9x1>M z9>&9zE}UsKy;b7h%OTmvGMcN>#C{Upx+;oUHd|wkZ%a?_MrH(n9K0LMN@T^Zi*5%a zqgcJCVo+P6TXMG_ZmzB(F%jKL39bT|rO4H3%j<0kvc*4t_Q@L@?*}e+O*$aKfNW-9 zX{jj=TP9GX6!ZzM)&(I`J5ayY^vOUwg_JMq^yu+pvpu&2A#t3+5W_wVw)b=osArF5 zkY8uZE|PPGh@hB5n0%?B81plJ+yVSGQ1RrvA=9(?zag3lu07l$%F60I${E;FIb=s6 zb5Dcr6|d(FNEZ!%7qt*KgLNB(eqPQ+5(Sg%y`^aAvo^LyIt{KoO0)Nz0G+MI z*%CHaTuJfq_?NCxuR|$P*4?I9YS?f01K4`4P|YAF(QSA<3p&2Fvo=q0{bLKQ*wX`kD0?AN#NtIQd5mw75h zDl*SwiUvcYL<$Mzj?Aejl%Wijl6h#Lfih$s>PCh#CW<1Wn~)(HGWC4U*zf<@@3Y=# zt#_}r_uBj3)bDp)-|smb=W!fo8f>Ia$6OG5xk`SE79ACt4|eh@AfXF%hO*dZfnl~W zyts_Vqu^!nH!>LDX5FKgrsr6C*E?2VrkZ2Nj#bjZqdwqFeSYq4BBx>Y5Nk&!rcwY? zyBe0Ag#upmfuspxWs{Lg(wn$J=8KV_OhrW2cS)H}KxMwe$Ay3Y{dZUFfJ72QAMO4A zi3%K$lp;Y5il07JQC{Yzv=y=qL`txVXviHi!Os6TxCiV2$^uc1Gb9BR#ySZ#m6NAV zC47s!J#m!t9-Bof*DqX9O|R6HZWKh`V)0bnztzTstn(7w2;49~Hu`xvQl}12 zfwlJjC4LoiG_|S=g@-d>^Rso6^aT^ECLgAoPmHU%#6ND*X^85c8C8u4KRMX1g4vS% ziqiEPi)w38gU7uglcJQV$e#cB@nf1W=8ka_E?d=B0V5-&#?V_)tVT_(j+qu@7uT}q z*QD_l!8^W&lUd^?1mMg9S=03Xo`cI?RP@HR{jhu@Y&s0{$X-M*FcJuQHrDpfD?~c* zdsryCAa-Tljp{vy!g^5EY-A_}XHN6&sh5}oK4mtoRULVT3WqRb*#d^yex4H%t5Sa! zaaK=yf6%6|vD1p2G1=L%W@+T2MkrXrlB*KE#rfQoKSF$#%y{1DT(Dn4*oEicmw_hL z$J)(mu}-!Yzvlv%ZSL#)ou|3EJ*8<$=bLw>R<*g&t{!zbhhH$rc<(; z|F4Su(O>N+2w#o1$%|=-rml{*%}(y7M}Jkm!p*8{d*SBI4Ka7kT%W&7yWsn{QG#&3 zftA(~#GaA{o_4EKUKr-UKN$s|o7oC&(UcCE^L|CQB50tV=b$HvSu zH|rWO5sjLGt@i2Dlzed!t&_%;W!UftA0C)Mq9GH=kkS|mie^2guRE>1sY#uB^+?p$ zZjd1&Pc7ves3^4V7B@^yVjCd(?jN*jRg(eDQqMxxi@4uZJ*-0&U(7nYdmocTS%YV) z(i8jh+s$H+cE}JZq=QDK8)^jsoR+g-v=wVLK={Ki_fnv9|5iF5G165 z{ z9SGWW^Bz2SicigO+WM}bIn7$OoYl2oNeez1Tq!tZ=69{lve13CLw>Wd%^-b;?}VyO z-?X%~qoc%C_pbVyOhY~ndRAsMtzMew3p*72ggQWb@Zeo82h7^T{Ad243Y?WOzx&ov zAA$-C3Y=D!pl#bv*dY*&gkiTes@;b5>#@8<5x~55{VQXTZ~Mv2JK(R`MQ_>C><4%N z1ObLf60tO{P`x1~n~$GG4Khw~6D3vz!C{wiNz-D9kJ`)gGPYdFNndv^Vy7FDnm z254%0h#*`S6z`uS4wkj8OHvb|gJbHRd0EVc2x?xaQ6=3mS-Mk&Y-jOgfnbl%Dcry{ zYByv^W!!wRC6K9W^By(9bKz#z9dUd6gWvN4t-t`7wz3eB(4Q9MHxLvh!R zZBaF-ndzx0NQo!(@rU-g)Ks@V%-BG4ql@9orh`9s2yUr2)TI141n?G&2>7Ww=90f@ zVr!5OPz|^I zF5}&sZm6U6{^o#p>4k;6O}px!ATAZ52{q=-`7jp&F>8)r=51je9+Un}tiiroJ8o0a z6Fw${G^VR{k4@7l?S)ta?%{{Ht&PgQJWblQ7?cX^=-{r<*iXEaxhQ`FgMzfE9=UQ5 zS>ikle!C9r2-PPPpU|%S{QkXn@!>w!Ctb9oPfX%KGG<^_15hVt0ucTDqHzak_Mx(h zByfC9U*>TIbvHk5CT)Ii_VfHhX{H%7M97=+S`eq=9b~^FYR8Us03jEVy8s>7`VD1` zRBHgDyLNtw>CcBWqvJS)G$P}`YN${`#;R%<_TM-7xY|%XtUqVz8Hrzv3W7DxbC9i$ zP9L42MBvCMj1nuqaoFp)^7DuWx9$A08>7ZehgbVYwEa3`ckH-vvQw+7x{IUbeltTgLXb5U}{n0*sTs z=tNKc0#WKleTv4UJnrM1$7ky>xn$B_2UZiAR=R7Ryg<8ezs9-&xC((pG9~y(@Ct+X zKhD7)Ysm+yMJ&T%G^i#+>8*%uU{htLR$dLMLJalK(F%x*1+l6o2Q@QiT)0l({{6MP zbyKI;xj4Q6nv({`hH8T z%)Cq6)|l-SO$V=n2tm6qLr<8vZw+qE$&Rff9lKDyg56yIYTQ^>7iY}qX1xSr{S+8* z0%j=fKp;u_4CI55sIR&k)*FLAJ@F{~+JH#cnftpJ9sLH#PQiF~B00Pr;h40H1sz$PIfMgOJ>+BHG#kh7q=w)T2N(0htL za1NX&{?I=FCNDybq8`4eO(-X_w{F`e&HvqQW+6*ZG`HdJw*rP<&Qzl2Uy{w!4jeF4 zZq*tRx%%wRvu&gM{D#~im6C)AE2kcwK!L;Gx&%uzV#m4#D)Qly+6?qoXsGGz2uD!~ zo|Hd#czfK-r-X*YI%c>kP&X+G4K*KoONNn9bpMO=SXS7ELcn5_>1k1n+k}u26=L0# zQxCHQ!XUkdGxuCzB-V*IGXk2cI8)&&$@B*gmzd~iuaazCFrt%lT}1{{_5Fij z#eq)YHdXpRHb{JfgRJwaKhW>1m33_cgNhrcXhjWrG$|o(=>2h>q9D;LJKwWsPZKhu zth!#;W>5>N+5o|4=v9R>aQesZMMX<>D&gyg@@jvUhc1ieV+yijJrSKxdSJm4mv$u4qLh z0ou;kWn|uXL#uu(=y0?_){JWl>WLlV-u|weHWvw(B!uzCQy@h`g@hh1J-2Xu<9yb0 zO~r_nf6N$;^#=9b*HUj@WJp1G69nl@RCSDI@Gs#~V{s(P{jV=$Hrj0oPj!UpCO9qQ zkx1{q9F{r3YR4T+xHV;EMQV!OY!a?0hy`urn!s2REn+CV7GcW>yZ;4c+$t$HP}W82 zf7skg-M!=gjb>N>zlZhc^}&slUH%OZP?3oY08W^*ZKgm5p&x3v9%% zXBct;3V=+6;a+6i*ofj%e5$Y`;`Rl9Y>it1T;Gx?V@*4-L}m@@jT@^vI5-$iI&CM0 z06)k9eX7rwZX01siPQ4a^k&dtrgS2a3B z*A)aYKBr~37)xem)C}GwHLo(a58J&v&o(mWJSAB^@&E5D898 zx%USBG~EZxne(oXN6P;FwU`Ic?nd)NNyfq9;bC1KkoTK(aau#fSwLfHrgDLC0HK_| z$rQT;PIikXDS7_YVpdNm;q+F3sEUa|z&+hqu%8U!{HH8l?} zCv;1yHBp9bB_SL;5v4HvEB(LnQ!fDoUKtDo@AZZ?k8xczs3#_6H4qgv6n^m)oz9m8VwnWN zNxZoDU1^J&j3JV|zcaG!6f`MLDZ3x%@x`TugQ74Im2)}}hh~-`CEzezk%*|=YvL<< zernlfeP*}fmnn*5%I8^o)UG~$d_M0*h{2J~tA32w2?aqE3DVP856+&QVvyeC%P#|W zD~q{(`vy42fh6J|sE(8J%JlT~?kCt)9Xs5_V+MYan5RzRke;0XV#Cz(k-vKwjhG=PO{#awA`8h^ zA_36?(EZT&t1_s8rd!P_^^iRn5hH}jOy1}cufWX@$(QC?5?6+&o7)g(unzr@u5CYK3gOawSeQQUy>m@GfoVbyxC<@pbz6b=`Ee$ zJ20~`46xqHhZa95i9(bwmgf$q1O?TQjvdkbQ`#^Yh>(C3h5lGD@7n+{inw7oitOtq zK)z{J^qur9Dxss$he*~1%@bII3=%ef9nF3h&_*SQU;83ZsB zVvxLPMBJRfh*`v%X&K7?&six@}?- zm&4G;rtmqqIEef|1jWqqK7oyplk#L@NTb6>B)FgxIjiscqza_7$0Fu+$z znB=)bZxRF!d>w2|QJ9Q;rHs)mtikkQIhFA97cbfqWw}W!$r^(zf6O0!-Xw8oRc8Lv zpMDM6os-O1p6cJ*z2DJ~` z2X)MY7xC(rm7F}=ceOoT3cuaWi|Q z0?l0-rjfjHq&?OAtI)Nhf`BP^W(g$w{Pg69kGED%7s$c_{ru zz?LV|q>;}Ij{h9+?(9l%*K?@@wXNMDu~w&xy~62}pDc#gT%=~KvK1{olB!Oh)7b%z zprSSmoe)A)#`WzWzY=FnzftuYF{Y-O1#atA{_3SC=R9a$>-IJ^-8vsNw{uR}zBR*f zBDL?U6KFmtq9~`jr;KB0xsosajJHRy(z8*LND-`kUoN8yEy)BzlZ%`O_#axr3kRJm? zkDm3kohOC=dTN`razB>ON59V~?zeFJxpPZTZ2)koMi%57khbrML8u~@h{Ya8b2`4; ze+XKmOjZbI?&3x!vz43++$WO2j$y;+bf&gog-?BEW8#@-jNOii8F;UU7nM#BLRH>n zCWC-|{W$Kyozt5*w+SOAtQg^^6~i=aj`AD!++Qd9+nZjhe2+~9FGH2k&9*W+_?BLilRpD4_O6HpXWm5+e8NWI; zE}hu9Gy6!jLc7RtwXNvcc(O*VQ^p-^A;MAi$VQZ`)(bl#3R^PsaCjYX=unD?_s9P! zCXOFP(%GLk*1Mvt7-7s!gIhOl-n=Y=b4rn6;j!9R;2?8<#2*u2QuuTFH%6Pps&VnI zFgyKFUScckM3g&=67sjjYi zWtvYlue)m^4rgM%;$Qj8!wdrVbiovWbwHV*>0w%3EoD%p%wurF4u~2OQ+G(mz;s;c zgNYQ0h6B{Y^yN_jnDeO>;k7=aAEv&`FvtfT{z1PlnyaD~Cuyj(tHtmzWim%bbw$}! z@OqO)P!$b{Y$+!gXRIc${!GZyum|db9U?q!Elx>7qXNgp-+mai0s}scEA{Kun~c95 zb+EWXNI8QnGWnlNCMIrQ0NM37*X`st{NTx{c}N^T@DGY8!*k5WWp|c9N4ro>F?#f} z^G)CpG0K+!FPk104$Rgf$kM7cV66y;Hgx;;iLZ_`~20?GQkI(7ouXr6DiW*l#M2MVSboyA?uX1i}-*X2JwjNq- zBl)0*Fj+wMpN?#VamAk+6}vWzA}sYX<6}($G)2pVSfkTyud{Dm$Np*qP8o>O2tmL< z#?932Ya{43#=cFnMl$zwGVGI?h35z1|H=SDSWr_4Jfun0_dm8RrU&Tho^5im#C-cf zH2l!mY|i%Kok(&A54iC7ahpFACCGdL^SW9~-bhtw8=^QC^kx$OA4TW}RI><_ z&Rs7JY7{)HlmDBtWJNJn{aG5yl82w4n9Zn|BlHnCMW*!Td35C)^;- zBaK)hQ9_)Mt`hOro~zgIJ9P3(*~j_*3L51y;mA@1`oT;8!YjY>VRJW=c``7ake8pI zRzA!jdv#%w;70~c+z&HZ8?~daI^bRKmkI8J%hde4fu*z%_iv#rp?ZxH4;xSiW=96y zTOQVdP--_tKk=&I3h3SRH}|1k=>f>)+~0`V0P>1%TyY$6I*i>_4>R!kIvpl8YT9%) z1-&>NhU>8X=+{@4&9P5F3ZTv%7ODE>`~fGK7L@^Q$wH z=LRkS=RP~3QDI-p6X+#aquJRSXPZJ|t*-DMrZ7i!^Y{)KSi-kZ$iY*-P72Q~=Cet; z%7O_@cEz@wz%vp5(5$O77CgwyJNtQH#bixkQyIf*clKSzdH3R)L)A%JVy3NKpVTs( z=upvoREw%9NsT!ZLQjkB5tLvI36KK`1`0*c^3i$PcG2gv!AGdilqNE>lj%P@7~(HH zk3A^dbP6ZnOOp>y$my(5Fl-pyF)pmoPgeaV+)Qyv8;tAjo|z+K)vC-N_K;Llc0_*F z$M@?cVpRrbmmZ|p&8CE_`TocJT!yk#EZJV#*(1t|4_Hn(_#AvA_#<83=j_M5jS}$* z3wV)KyAe|g7$&ve`*oVdM)$@ki%k+Q4d_BSVvmYrNX9c>Ok~1W`#exsu{Wf0<*3le zQ4$v)AxjD3YFzt2CRgAgS$;jXK%0e;5mcGY(wNK?DnWrNgD1-44C!Bj;>p#p@EHudm}H~A+Tl7oXEIO&#F)=)TI z)s&nAj<)9JT*Tqa9#NccUA3u^@M^Q$VG)Z{lM`|XNG#wQ;z^GZK)m~E)vC2&QB3z! zEL8#JT#D{;U<hUxMH>N$YgKw4;6qfT1!Jg;CEPfosgpiERZfzbHijSjHcj1^|ft z!YzI<#6BpVg2f#CFW4|O5AKt=z$naz+sd{9DtLnpRKQ{zX*%6M(k_QZz8A@7?E;@H zHp3oPoT3=qIGy)%#BN!h<|pu`AmC5987^nOtbwL3VS;&s-iSGk&)uY6R1tT!mCRIa zzM*3@rif8zTlCA_@rDSef{3OvGJ&d83oun|Ds^{#p}Nd?mc7onsd2lmUEhz64r|Z? z&bFX51abO8EwU1^3=5D_gM1FqWWKVXe~Q!)%oN7iPR;HuX-krx#8c+MjEl0VI*rNL zBY0|FxN8VUP6(b967;pT9Ji0rG*qX>^5&{&*o;l=GwnhZ5N# z0O))Erw4Plu2nG7=XZr0LChtY=+6Fkdt_t~nN_U)BwGpbcwdB_H6!+6wfC008G|>j zW%kK!(m&>sU0>=e{@rh7XpV7*O_4qcMipk|z{k0}I;N>$1`SyKmEArvcdD%=&QfBF zC7=HPCbM1I2dJw-VD!8s$D#tC33Yr78}KK$EPC?s_q+A>`Hf@O9^GN-K|{6!Xd zKfQHFmf^^N^6)L5i+-EMaKhYlU$PJdQUSP^P*x*A=A z4oh*RNw}V2EpcY*#(3Z&kOAntj=3PYWS86QKC7Rm=Ci#Q(oC>K zabFiy^8Py+;UMf3I|glSV=*6-{Zrx!z&QZ5Uf<`jnJ%x+{$W}pRYI-)&z7<0rr+AN zoS0H_OKsCV^7@wkHyRB^jpS;kws}(prKVw-0siBhe+ziN8*r>LlriC{z>ezneamkBD^Ad$t>QE<@EmR8UeX8; zdHLG4<_X`5UKmxaFf!{5FGcpaVDX^peZJE-dgfwG%3HH{-)owF?G0t%lE zfD0;U04GGq%}io}WaIJ*&l^qPK;uh~nRr<+Q&}s|ncCXAz8{Wm-JKR|UR9XN2V@#V z?BPk|;W$MxZt-{2=FO|%N6X$5IA2JC7%}6#pP9KjNgQ{P>76Gf<0@n)Fv^ z^Tt@Cuc0^EoX~c|vi8xzV`_^CBbq;r&UV5j$htLbp?#dFJ!E@{r>sLr$lwIRRU@B} z${qF{CpV4+uq1)3p=wL)&<=U>)sg3}RaSOr|Aa&t9}h}Z#Q;{@|-ZBzB|p@OpYrWVgf@R=F# zORkTPjl;NFMqRFrRFo<%rC4Ihzsz1#{W3v02}YY|wMfOubwLR{8p^HH_<{6(TCXLj z&!@GBL$eAGA3bV|dYpfttzG(?bOYirm;GiO>mr6c8C-q~`+;Rre`Z*H|k}A7C(c3H>z!JAA1K)-a7Bo9lEz+@9 zZ@fD5udo(?2$Pw8H!_y@M-sRqn-`~^p$+}#(WBOkRM|BEu>un;+u>8?GwQEBcWzK% zBmJGrI|t%dnVR{nV`(J&J75oj$H?{}sqCST0;P)W43t-O4UOss1_q+6(ax?lxqO^GSP8@W0%QG?el_9f6F8$c&WW$_eqgFjn_0Q2N0!6uU|AFcy7fnnjH zukyCao5*vlsMj^R?BO}zKGa~?MuFpG=6uG;^&&)XMxQ|>1mWF{v$l|j&7Sj>g3Dv; zE%PBkl^$(QQbN0XiPB{>wYsdmz&GEfP%q3Snov}Y-%SpQ@2(Bt1%$U2py&x)6++WL zoXafXpo#YvCX5*r1BaIV^wEstac7V5ul(5uXU~SFcajdc)lOC&nwj2QD2Bv(@n-|Y z5;DBFhvbdGl@}l*L!9u6nyQ~WruJ^EUf|HllPCY#1f;Hh=d~=-tJ2IQUdNwMz%t^; zTFhGF+qH)F{+aE}^<~jQM%s<**U1hrr5dS{^Qd1CU)yV3hUwS6`-+1H5AK_~sYw;j zKNIo;17HL35EgQb;9fl0uuiu@@deM5suMGnJ)|onzb1A^UcQ`}(WbA_O|QtdzJFzW zvgA*vL`6hYR~XNa_}}PTG+9N&X660a_f^b0Ccj&H%xEB3l!7Y=br&roDGG?FhtP?j z!UzJy(BK~A$L?|mM+gB9#9(DnA?N3hcK~6g*Oql_;{BXY?5O6jqXVPGvteqf1=;iUE$gX9?0@bGK9kh?hN)1{OdE&0j^+L-m>?h7cMi0WYQt(Q5*;l-MjhZk1 z53;k0zhHCX+O1nx+Pwq(GIl~3y$oQ-`?SLys$;MB=(uWQR>3X zAR-Fcf+os@i3HY2>v`wTgj`PvTE)n_8XJREq#PTOa~QAb{%(@->0;Wz;48A2!{*zM z!pdL!z5NiwNt1q!22Z%yI%(*;tP7AAd)qn%)?Emx5II?83-ZSBKy6;o?weA zj5QiH5e2lE1CV4f`}&f73NFT1`QdqAZu2ZjMbq%w(AS|ZsM;FLtofk#d1m9@FLo~G z7)ibolE9E5E^`M$3WK`h-!8!{9>_`Kv`X-%qQjrV^uf*{P-V5zM*UM6NUCCZ4FZmq%SVh zbmO@TQ_-s{0Z^GH6ToL;v?a<0q;wgLGbqGGmDN*ycWS+=Ib7MDmTo(L>UNxS<&*!~ zcx~As|CANxc^_ri_osIS1gJrkH*MOqb^uKvswme_7G2(fx{^xF^4R_cCC8D$MD^ZQ zb5~O*H>qJA6Ta*F*`B1$623!Zcv$Ktzlw|S1#uC>qPn+xc6D*)Sn_dS_BWkJ_hIp% z!)gtipZJpwJb*+_{P2azOPTKQ%6Cg`Z|4BMj=_!{I|LXIi93)ht(f)Uj1)?trVtPB zd3m5H5-VC^k298R#S$DdmFrBjns#wYXA-NT-~bow7F?V*jHt#kP*v#P<4YfyfB#)z zV|^qlHgZz22Mgb8D_~qUY^k*-qR7Ti79ewvS2NnAu42z);?*Vmz}jkR(CH62{>A&b zYvPpnL^gVXs~7~k;HV>B(d;MrnAsZ?Nth6RetsNQCi~s|zP``B`Cd!adC-|dJ&!i1 zdlwWGp^vn$7=RQ#p!4K2drc7akcN@kYGKM-&Kx;g*EFOx= zB>0L9MS+kl|7OInlIO;{Dpl;^Q=;*!dbyA%Wc9odte>kzpGgfPBLd4e!+$GVCrp^v zqRu=6Pf7Z>iSIk=U?jf(d~(A@{{&3fdbnjW z7>?UjN6g8X#Z5)rD4uW`# zts4b$bcr_`$3*5KBwJBi%pbQE5dR9~Uv9Eq zkVnjvK6Q&u{y#M(nFu$Hig5~)0VxRH!?h7~f(5q| zlA&acKeMPa!wYyFE2z|}I;>djRu&^oJY=Dh=-4Q$WbvyEf`rw`6B$nObM8NcLoM4! zDN=_|TXig7f zRl-;=YiTuy$2UpxTMwI!M9NM>UlJf{O40?XhYcyZ%Z_gzLEVD4r>a)qRHLrZeZ$s0GX;x`Fj|60{*ce{Ol~7WEjNinH&AMn!Swf{_!%FQ{SwiROeolW7Jcoj-SSeZjr5PY5kjfM zvUr@xm)U9qE}Nj;#{Cqvz9^32nHgaMdxmPDQ#VcZurem4qKDv^6Q5SZ#5`;HWeKa5 zs{C*FM*K26L1e8mxmF@PI=vA?W!h^j?i-UnVqSUMEx#7>D{j)%b6qPYFE2~g)%Mvg zZf&gXGDgiIDjaC_YfyYp@*(KP- z<#^&`x~|IEQ+j(8HQM@VA~FFr5f)qQaAvl0AQRf7yB5W7Q^gT})yRO7B=TKoj%v$OKPP z;-Tp`TGZAO<0U8}w!O5>XH?}5d%zh=2=Pvp^>*L_E)^MKWF~nXuN5_B`Mo;odTJLA ze~zJVCkNk(^P|al7Fi~z#X3rEyOzm^b~TF&Mjq#ws$e zTbrx>cqIbaLT7~N3BBmI;&BlesK@}NWq}4IGDU&!e-BeA?g^)pXUy1N5+AEeMppLN z^;r_~5pYu}Sh+Uyid21UO%fF^iDC*Qa_REPHRYMTC8Z-^l=4be8t~@=$i*#eHyvNQ zZrvrZHzR$(e?0Pefp#fWA6@B80KWvKq=3iAYdxPN>Qss_%m6xr{n>L8S#5880jXJB zzO<@Cudp~4VFQpX=LiWyH5AhjX(8+mkdhJIqPBM8R|ypg=8KM7-^GX?1Sy_aVZ+Y? znyX@3^@K9LA*hSZDUAF?%m)%)?BQSKDmZ6NqsU0cGs+%DlQP9b%)oI8X0{kjP4mBz zQsJslJ>jg%E+#27<2A>LUxkB&%edRPq~*o+m%zd{*4~vHzpJR!9MhOKO91;%oxmy~;PmO^_8tG9A#ot2 zLTRhCvj@}KC{}OnM?K&D02#jfR4u)rK*eR^myCxt1Js20T`8&P0GvhK4o*k=4k5oAG**kWl#$jB66w z_jW%l6g_ba%er?S>ZKBpnp%0~@zRka?MvUa^8%{9r8Zf4qpz=T zk)1JQ(dV->g$Y~w=!`>nIi2xCPbZD~1mA3SUTUhV?-gHKYM!*W@9q!@KV7J6G!yuy z`{j75U=KDLY-)UQ=b>qho!p_%g?D*-XMy{$$_e^*@q3_0%mSQdJd%L(t0T3@x}}Z6 zIzohosk*x-#?G?VsQU-Xb`1vxv_=aWap*ROrMc2e>j_)H6# zhpN7N8hJN`BI|SAAng#j5;o{d#l^sXY*6-OfA}#G|6eFc1|H@zT zH3&?{AcAqz1?kF;NU=7UAuEvh+)r6nfJX zVpDThI~uH7`vT8QfI>)inzrr^n0DE{6a0n=XT1-@g+0cys86N&xx+fCr2irY?xSpw zLqQL6E%(MH_B7mwb24q+6iv0f8a28qT_6Vf&*a-L=@OvQCsCd~yC#zaM(f;pG)4jU z!B%vCzMEZcY4rBrg2=ZoUhD^Ak0%Y@Y*=6Eqt~U&m~cCyl+zWXM-H8O_4*XNcypA2 z!x*rk+w@HGdDJHBh4NU6Crtm#Yai4QIcj|UJCGN*rnKB9kDd~xxoZw)MpuFY^Cuva z)=9vYyW&@CWL}#rHXA1J2jJiZt2PTTE{4ExkCdfr4iuIR*!gim4`(Jo6s(c6T0I$Y zIHs$z=jzJ`NB48Tr2stqJtV+jVEOIgWuHH9))~Ef@T6OVzaQ{)yqL6>z1SEzrhuH7 zhX*$`N?zT@=jv0(r^}#88@R3$6~WYuvc;9BqN1YsAMLV^{5?@c89L=of^A3Wf>z-g z+&~`dEc)HtHwe^Pe4-Mf9O3@a0=&7{;LPNj&6GW63HC{w#SxhL-yJI=8rJQE(#S1a z^q?k!aEqI^H2_==0%?aUH3C|OAFaFr_ji74BinuJN%p0#e; zcyHlNZ%doy{{HiR_1@ioVdm_uO1h({ z8z}%RY2H~IqHZBp(XX(2+CMJR03|Y|g;M=!~iV`##3nzJ>)Fhk*XeaPI z5+|NWPIBF|#$wEv`*eP(FavfD@*M}qtBe>(aV!s<586JrUh6%3XbbjX36s@j*S+LP z`JcV{-40GsLpaRPS0i+mwY6>sc1Arhj-0Hw>>oF0zYnHBlQSX7^L1#sluyYc#`Us-icFZxV;P(jD_omIH5cbEFARo2PlY zOXSBdC#bo zvlyy-hmph7ztF(NLzeOZ%q851C0$*pd*ZQA5?3USxo-bXS_Rg01R5<{CV>>Ie%iQU z!)Bew!&NjU@OtmV=KaJSbLSjnUcY|UBOmr(0;6DbgAo*9&o4!IE!Id^6lKR`#sfZV zhl$*_&F4|wIrK`~`pw@b*N)#7a&Dy6WR_+?J=i3_iH@X%xE{4&?$|>pF64ppO2=W` z!*=^2yse4UpORRxzMR~0Kjm4nPaVBWtP)Fvy`OLDlpgsO=T5mR62|!Eo<|R1>j7nX ze|NXcczEd^c=M1(+PSVvY9ABI4&wX7bXjZS=H^yBtHa*}dWgOHuIu#29e9}@p*$JS zW6x$dnw;zGI~oM_9ua;ob#}hK9wIS&p8q{stwx` zCIK_y;S~l&#q;&<-2<1{aLDY4MTS{6Smik{Dc0mSyVJLI@vEVCfR8r$`_FAXW^9oE zC;ycHuaCQ7Lv00kP2e4m-Ziiy(G})jzWLgxW9Ziz2lXb%jJ6m(I&$O2jY;kIuIX_( zo=M~@yvvhf_S6$o^+-85 z$ScOGM+_}tlv{&P*@LNd+&^-PL$pa936A{6sLp%YVc+Qnd(I_Pa#t5Qy11@mxnUA> zpIeS|pH647eF_})2}~6313K*Im}K1xQ_|hrct25*${{?K50l7O@OXT-V&Qks2S-eC zUq4>juMdtDR$f(~UP{76j+ZYtivg7%pkr*^9nuf1qifA#V8*|cSxZXFJNta5K=F{2 z}&S7YSgMVi+Vx=BH3a;Gt?#C-Y@S@oO5r| z;ztV~oS!^&8MX3DXq!2%8wc&f8)2CI9C=Rkwhkk!DXWv%y#YaOFG13vS1)^L(6@u0!<2SqbOe{A9(F)tfEw4zpd@M4ojS9 zo`UnSM_}E(_-@TzHJUeH@@VeI{5B`;*l#lF^3yL3Yuwp8yq2u3(04w2k_ejO{m(NG z9$XwlT)p<~hckze4!-pk4dsnN&z^SS<%HN0zJ%Slahvfky~khg#g=fl%n0kXjdja< zHpwb(XC&*>u}!rICwK1``HSy5iTFSS;W{`255m%J(Tz3Hx~!msigKF_qkCjk zuC%$PsTn$GXwhWmuAYxh-y2sTL0leIv4?NT&k_Np5i50CrgGB#!ThAPv!=9EE6|%B zpmHc_NAt-`!ajc9wEOe*^yNJO_-Ddk_hD)&37*RDHK^5#3*FRWk!@Iv8kIA=;h@-t zLwMINwI^M36H{8c{_U^#(Ps7H^0xL*Y@D5)U)_mW-G9*=iKgyU zW~`eswECdRBI}*b+B^$R*+7_4{hW zfVWmsSj4Cf-PJ9nW{?Wi>Y}KqApsRX+h1#Ie{uyItwixjE8s|sw53fahYWDIVytx+ zyGi)kE?^o~XhzRZZ_IkOncOz+UGkZ?^CnxIOkxsm03Z1A^+Z(|G!B6K=C9tl)!_NGJMzFUl&1{iOQt7ARhN~t#4q2KnM)!s&v zQ#~H0Cdl_@cx0mtKR>AKYWr;immFa%`$<|11c*207ecof4=AoyyY}t&8wVDpM|e3f zPZ&q}0~+%0D?Q~&*QyFd>N*1{=IaBzH0m$#)y2C!!&re2>+irIUT@sKqd7({I;qXv*2LU~%jHNrz73vH9)!@3Zrr8-n8qTk|$^1y0xY?NduM!!OJD!zR? zEuR>>!OUi^zOx-SQ|9+%mpduzd+^eE!O6sCABLgH-~F4pa*3zxPWLh(4@wlhQ=+;_T93<2KnfbcCKOzGwZ>Lq+<^0{|HZc0?Iqd$NJ0? z$L=nb0RDMbvo))SJIcF=~tdKW)vH|abqs`ycZb2jhzE>7bSD5x{O+N zAvmtiQwunhC;#3@%RpD*wCk;LnnsPT8aZjdJ{d_E3@;m(XXKE)1435}c*HEmMSzqM zQl-7zqIZc(F@~(nl`c$v2E^}jxmjc<+snz%qF>^Ubg$){vs$j}N`Iz65ut22rH_v< z4bsjb{>~`rB(0Y&?n_1_`R>E`G6&ZLhx64HMXi@S@DmQ4E!8hKDxx3adVr_RRp&6P zKui=)^;Zo(nWWjOtjERB;>Oli(%obA;^F(4K9x>>1d7$O)76htsBRnNd)zQ9TM8w% zOAct?MJ<7_ATKF6I5-|x5y?lNYg+!i*hX$*f%DDAmNh1gc}rZ& zhL$+}%Tiq>{m$i$ZQs4Bt#?7L3H`oBxN5a(pWwzygC8D?Db}ne={)TAJSMWe2gh0? z-D@;i{w@ajI_Ah!PyHfSK7PN2|GFgk!}1?&mDmQYY1%q#Sf0qsbTo~ee{5UI;f;k6 zUIL7eOY0Q@6_1LwWb2Gwb8S47tJ_B>2Yw_U0bt8<+B?A& zwl~f&(x_QgV7?=6CT~#q%tv7wnA^}KWBnM!1MC=N5yER{adCQx;Ti599o;Pp)Zthx6D9b7=EL2H&`u5ki*G51c7=J`|p4i)E zX;!4celFH5ut!Yk;#W3S9s1AQoi&1jRwrTG%D)P3RRK2EfX-?;L-xOf^D_0;crV+r zgFZ!&T06V01AC>!@#0L4VtReUwvzFvJ9xAYkQ|*E?T+-RXkQ=xbM0v|SxMKNX^){* z<&I?%P91afc2vc*A5@*PPjGpPa({{QTKlpPXj^ z-xH>ibgpowXbI?~v)R9=(W;fa)t5AR4wEPE0k(@E`GB_)gT|vYKk?|=uA@6_sbqTb z*Maj+mbvY1^zq}zyO4|2oYF1K%nq&RORG`B(Jt5j4d;dHxUQpdPWFB~;>S`a<>Z6N zE|bJcw?A=v`8P&dPT_X@Mm6`nyrQKvd~AK|NhQZow#?30Az)YXlHnF(?ijpyw-DM? zIY7hwXLV+q5#luPOXH#G_2^-n5nTMUu@gSJL}rRc{?Arh8Y{EMNJk26NP+X>m7d=7 zss=Fc+Ogg$(^pO|s3&+EQ)fY0eqM1spSc9o#3@>D!l6aJ_ws)^;p^bM#iC_lKe} zS@ph%!c2Y@@9Zve>GBl+p9Z&9zMTA>BafK zI*hVH>I&AGo4m7{?S%~jNVCK&UvuqOUyo1+~mgA9#=kpph}r^1A#OOwr@>`rrwtANorAFR3=>JRXRu%!7J2yo*b(jQsqGa4`29 z6#cWlYqfgHlP2@d>&$bhrTp0F=IB#)t)nzQ<~Eqim1BV80hlF-wx|0=;DVif=NdEE zKhoKO)j<0NvDMSN8kC|0IdXjE-Nqs zLyi1P>*Z-+>-6n0kDO2-pEQx_dUCK@p9}RU+MlgkxMY2|ZQWc~U94=YF&I3tLFJGw zDk^&pZDncrt|k9A|Be0rBzKbX^LouFl#PN?P|_^KkX0$L-4m^2R)sjLs13+EBJuP! z5jN}Cy3v%e3mIHdEujd*UAg#G`$x3`C1nv8~f< zcxht?SD2vK%Ao}oE8qI9`rck{{`4lgsq$O?zib;0Di1yX6QY#UDI0R?xdW3pNjWrA zl3MH&?dA@%QM_7lAEd+ac^)>>sdWvd6&r?Z8M8US6d9N7lpvt`sT55gU*FfeeBWBL zW~V&Qb>p>W?XjM6Fdd;jl+NW5a_H0jRTYgEh`k+{{ZNvAELa`aF4P5_F)}u5bJw%w zmeVIpc&IsJG0n$lX~GFHJ-JwFlTS|J2z}!N$5j4!F@&V} zG$D09mw2l|D5OZBWSAeGd^W{_!7a&r@$^#{5(tZjwlkTa(o+gjaWm;9y};HUBL&4H z%bow$Qo4Yrdlq;(^lNGPy`rMH|J&EEXZiYJ3uhm1^)&cr5^yuGrv%E)0w*&KbE&1N zUMIDdMkDvH-3_}zm(T>7iO*OLI9 z%^^V_S#bJVCPbr0-&pt~`+~r8$rBXIQKKTt)3#0e|{%T^b`DgaA_g z5xBy;jnLG$vl3{Jbm}m2xK`A*ZHX$?YC84yO(K2+RYWk*2npY-CvJ_BKxmJj_jjgC z$$!S#6*w{K6E+*iJ;yJN(`~%`wfleOach|PZ-4iT1v^{MSNz*@db+A!1QM`vNFjo6 zexp6pfR#I|WENsL>Y{NxQhUm?d+<30d<|NwvgZjeq#S5w7HI05lA!HseExumz#cYC z$yhdf0I5t8QCX@6Fyn1WlW=wbV67BpK~!l5c^8HBCvZ-E;^f+rr(7-$z&eeCKZu>`J#8 zFF6mJVZ*HOZkO-hJYwj$IgWDNof?6pdFD+~*U+Y9^239Eu{cVOebarztnX5MN|gb-A3_-ED;Q)yBpe zK2OAkdD`FK{U)t-b7aZxUd&Fi=Iy+CA~ZC#r zA=d{x2>;dRwQLRY$c|k*`%G!?NJB>(N@p2!5ae*#jTTxfuv7Tog_crAQw{Kluehbu zQp%8=n{tL7KzM*~BVIqWxHxdyJmK?MVJ61>c_K$5#F{MBDdp{@l zaW~^Eb5#8MkW>U3x!g=ZXP2d)BI$KK2BcxXRln_ECzv3&;Uq>tmIs#f;Q5}q|G>!c zNGz4uK`N=&kq*vxpnYkZg97vxY5`xL?GxSd+cXL|%6%JD8v;J8JE-d;)JAn#Y_ zSJHWU;BbDL*-Edemuk!9)FvIR|*$Yi9wV}50c-t z?l;c_--+|$;@Nb+sr5gN*C;zK95H-Eb%XiqZk+_C2|g&pjzPX)2ypXw>J;h{sSk;W z5+1|5JRaShUyUPxc{rn;cZpHUw-?Y_C%~R3tODgA%bp%}>;X=6rCIUaMt}_8$5tx2 z|8#;gk2z5YAN&ykP%f5T9Q2cTodX162eVVk(|dE3bS=;VzXykRq-?8kZh5B}E<5uZ>V<=`WE?)`|aQ8=`}h8y?!h*ZTC1u`+n7AObCMiMVMc|%QQ(OkH` zSsRM4tF`PtO^Tb>E9_{Ah?tvo=#CR09xiHrl>dY@Xv5GMVA3tBo63{v*%a!`Sj`xs z-XY9No6dLBZ{PmnB3pZ;yCJmKr2Quv|0#pZ-+wJM$>Hcm>RA^ZIZwxBOUup;=|F6Z zV$`KSBPG<*KPCCO&x_?hh#B^~9`Xq~LCdMExaD8IyYk0K?7gKV-1YfNx66712mTll zbD=&gq+*{oty=lro8Je{ttAe$kUWm%Z}IX!aI#_gBT65$a9;ByV7`Jbu2l0$46z<^ zk3IbqFRZSd5zKFw88ruj>fG{wb@!feQJw4G?^0umu_Q*th8hDl5W6C3BpS>n_*zYc~!LQAC9v{&SrA1%%x*!hrP9!l!t|Np`4Up1pmU3nmOS0UBv~ z?+>eMS%=2GF!)#F>V;dDT#DMoa5{FR-;#}q$-Bu&`s-!Y^vx5$|NighS8rM&^dLa_ z5Sw5yM7@_`n+juLHN)S&X{_XZ$8xf{m)C)tE|D`v2+9r}EwjX(({{gWZLu_n!vaGn zyG%??|H;}>@G+IYM*|ueU{=9+;Xw+x#H_VX5{VE9TOR`%Gm z?0L(MyiDGW^f3+``~)k0^GA-~rKF_HCAz6K$?HaQDM$q_9YJzO>~OyjygFVvIq(>R z)Gx};x7Wf`sQe=UJ5m+t%*;wXNFJU`R}Xr3Tt*K&mHY_3jtV1GjT)T}Y05^T9X5+1 z_7+`szuX6_HJ9VHp93e8WFsp(O8INBW|+~xwI4O?YdQ2oZQH!ctigN)w*f&uc6QC3 z+{UzF%CnzjJr~ee)LJyCf?A10p*#;geh6S_7ivz?B+!s}r}Pp*5#wy2N$&=|UfR}G z(#WWcMQ1TR_t;l)y4SClpWz(Vsqu@Cl($@S4A@jLuSYX7ej*6OIL@;ZBdPVlVc|=7 z<4oaXGbc^x+Pn9&jn&}O!6Ln6`ds;S)3A~;tIiXaEO}G(F^fGL=KW10Op)3F&%%a7 z2FfV|f@G+_au=Lt2Hjj>3R2r`=F@Q>C6DXKBc&`#=O#8YGz10a*!Dv(I@72ymw66W zJbpz&KOZ~oEmHm{I3AXlhw%)8d7qU88}>+?3(&erNv-}}X^;fEwV}Muj}E?@d)c3v*bIiMjdYI6pf@bw3oiW$V=@|3TejO!*d}iv z=O|N??6P0|H+A9MT^T6!GZk^yl!6WnS56}NgRPqMZBeBrSTt2~oDeh&Pdlc3tXTdj zT#|C)XMiUb30qj69xl$*ExsEzq}k<8*xOgL@buI5fi=xfzjyv9+SH2M_?F3)MK(s- zSvX5VH*#Ius&H{x?-m=kf}NOr{q=XJ^1YM|ad^f};>qWtg)e$Ep^DsF`3_RB@(;J|_tTQ{fA)xn4D zUA+5czyA`R`WiAbLCaxn=K~Le4~7koI(Mdd;A_t#4!>FY$ep0t%|87#SF0#(*NNWs zw~ZSwR=!I=v+%tSX#K(ohKh%r&}O9@<7XB|3+ohJP8qPE{(4&e;+Smb#N*>|&h+Kz zEU4u%x|5u_)!>Kor?7E62;PT`0*M0R0%=N^?&DHhdQ<$BpcUuK$Cp#gioKQd_HQ8R4e20NQ3Ygwz_3G1N4Hlp&eCsd6<}fH*_xwSNkRb+v z1a)0LvLRm0zCJ#pPI86mrGyflW?+%~gHIIqk+5pIz-eH;d&|j<=~#TP>fnYA8!8e? z!$V^$U%up|m@U*-WOBJDok;dXedcn&=WQ@85##drKYy+ZCb31d2?9?u(_@GHDIb;G zYD(QNwfR^u^b2ww?I8SWJB=VR9HaBWLyVpLuaojH4p!SS+M8i*#Fx9!xa-=}z&X(r zh6ibmCk=i7`ta?QrQK!WdVx+)v^psIrHq%JIE3aF(Zy2FzVeywTje=(;Mkh~@Y0yxWjpz7IktWDr%6_)+xa9uvjW4Ns5N?C_4T`=fs=mf9Xo+@h+1|u>%&zT5v{M)90$Wlper><@Qp<1H#sM_wY97vQcqwYi2;Nh% zTzi6aSVe32;GaUlxrETk`4Ko9=}vU{>>Q}W(ygn=q11@tp$u0}qW_ts!GmAl(*6{G z%Q?KPx&iN!eEs)2>V%>?ArNPikw)q$3fO1?f@q7J3_n?C4P=R!f>)Qn*fA9)=TGUig%<%<6DgAL>kwyCS>Pz< zGvNhe&{=2<{OYvnJLj5bS{}M?a3Q)mZnR-_Q$-`XKL+y=-XuHrT^?htMwx=ccAfBeHh|-4p*m-7mQWEes4KU3+%06q^DBzQ0x4uXZPBhhd%70idpv(n zZfOXH0)TrT9qP=O9M!u@dI5xsAtmOVSTc;l~&K#^s_c1UMt3xRF83my!4Q#}#? z(tH|aKlT6VcUK2dE`bg#$-kFPgMog%W2>1D0APuj&X^v5SDILl%RiOvc3io}5y8Ij z4I5qLgSEpp*+CP`$xtpY4DPt~W`&wFS%7Pr}^^5#6}4nRN|&s3x68 zs?7H@rS0XGhmor`3yNUg?#l@1!tmT3`c1&6udAXWw)Ui*RyQWdSfS0wv*>?ad@uiD zeMHCIyI)3A%}fOdYvcF$OyM(Z58A&>(N7UNm%#}fo=hRS`>rC`@ZQpk@3qWh7;p8J zTAwg-zobA8@%kjI0IZ?~F-*U=u98LzeuNZbt`dH0^ZaJ-^e z_hfA_AAuf=!u2pJ9%L>iE~{=5bUF{25O<2ss4-eoMf*;ce`ZzkxVd5!A!=>-!lrzL z{V0I{NpvnF*$%C#*U?ghjB4-Q_o_FrTL?69tC=}sZD~Un5I|h+;F#`C2`4%7$I$=N zbxwNSQ}Vdk;_9IETJW(wcOi>?oTlFe9P7sHh`y%#W z%!E4isjFBwYe>N(g+Q;7BC$dRT1bbhG#l71ys;86hp=f;f_}{^sE5xVQP0X0iO6}+ zcoRi-zaNs7X-`X0fF5~WUrDxm2Vf=4o2a@k6GOlZNSudj;n|C5Y;#=mKfgt=K)*VH zHwh^yEW3pHSWw!IXlm7=uE$;|D@zEsEI?!tG~mye127~YI7MN{S}BZd-Uti=YZe4K z(}a0b1cq;J!ec7uL7y5z7rxEzcuvgFk~TA+o$P!8{bLySq-y>1I*Ro!5>GU`3SvA~ zEcd7xh@eVXoD5X4UFgBMi9#tZFvu5i13-ieOZLj|!UfkcUvvne#=gDWX$VixWH<$)_zsaaeRCte+XA?l*fb!q<@>AF{ql=rWl{T@*Bg-A z9P{hp;{NI%3czS8=!;RNAP4_sO=KsCVvk8KrqGZ&T#8%lcjCOjAy>-=%T+DQTeJby zx07LW2G4ZC!UFlwV7?fQKhRG%hAt(+>^9Vx4=&@vW(GS-7}!s-z>YxC$<9B} zf3#Y&f~7}0%!~uLfxUvP{|vuhX!>1S1B2Pwi|Qy{8i<-FgwL0%6{PNJBV+k3QRT{S zvE1t*8<*zC(hy&nG)NPBa~2(R{U)C%mS6S-KvcJ7TaKQR-knM%!u?F;WFZGwz<2d3 zOt{}t)jW(!UxW1#I1=?37~7SZVriCKgZSt0uuTm_zM-}5~=Ti!G-uj zmcr#g#dG4vpumrmfJcw+Bqjal~%S?|#ac zgP&ndcDOo-a))})YxsRO5?g}0kedSs2>|I$v@EXzBiYtc)TVSI%h=~9f4STJR9;67 z=DV0>eHD)&DHT#%3U5R^z09+1TvyTwg$~yVAyBk`)p+Xl?$@RWB{lT@(~h?&+&@mS9uG&&*9-=tvlguzdL5fK0#_#>u* zXg?~G>os{4d~ClB?--H0hAAXcjJ^=}4!*`SH*+IG5h?%mSuw5V8u)s#3lixa1|oMP zFp#vU34Ggm*8j&^3&=;*A|#hIHRw-kwUkRG;>wNjPHw`=EEX!+pFwWkr1TM~eG3;d{Ul>{45vd3kGoYa&d_^Zti##Np1Ub}PO| ztcvcG#F{PacZL5vQasKfPMRrzjT8#fvY?%aw`jUiPTQ0+1Y$xqWHBcp>&kt@{`NuE zB?AgsSI*0PO6&Rt=;)vkx&gK5#EN(Zngw6%Q@g6Ny!c5%`B0gv#pcu`pEKN|hrh6N z#9B zttlG)ED)Ld|GBasvA>C;bHP;<+s;FtB-azyEg5y4XKt2g5kM+&CrMo>R$K>PbV^5A zS;g9j+e_KL#_#MxOnP9i`IFB#`Seq3%puQha|Wb7n0tRIi%*<0J3|aEZ2;Q&46APR zr~9k+!$eZ@+4Ai<73Uy<0H1VXQx%zYfvr5g+dfqbAjFG`e~NH*04c~6AC;h$Uw@4^ zdi3b?{a!g;EM0Xs%l9`b`{c_-Vx&MIBP<9oN7p;mOJeKBvF*OVC}rN^YelKJdM>Mu z(yTl2mM5hsXPKfP%AK&)Y8!F_{#1fP4vAQRbyd}>+?d<1!n0a%{p*GU(ik;gexdZg zbfo?>qzZmC7pWkj0U`-0CQ2fk;Q>k+Mr=PzMF{RwSv(}12U6wIxu2&F(f&1`Qcy&< ziqiT2APM#BnHmK2kJL952ueGgf(~q&U+_Q9+>(x$=9!K+827EuXJ+6k3}J>2e<`hr zcGb+O+!1X(iMSJzSQaKKggB|B75@On9SHSwbN9Ts)s;wvCoHh)!gb%MxJgymZF5Sh zbTN%N1r>{yAjF`yY7wsZOWRkD#|0^G?rZr`!Q$Ch{KbV|TU|n?>J?0;%8AeQ`fMiX)bOqHO*_Xkw<^g8 zD#60Rb_R53OJ^i{HD+*E)0h&U?ZVOVIC`BG?M!|`42;^>G(4&GKdY1ctz={gdXL6kpxn^=Ttl*kECvyWFigj$ zwwyM-be`Fn@3XNaiCXa4lOJAjqZ({XI;8lsfKL*gSYOUAcU*R=y1Fx10|;~$3wXcf zPkvyLjkmB+nvK>Ya+qEpV1%K;?TBka7Er+Q5IR8P;*EFKEb%cSuW>@enU{Mdms`+; zybc0l>}83Wm#nR0fywUu`y2Pm%NI<04_)}VUuBr}!JkCl$?iAE#7xP7nm2jD?Gxo5 znzd{SnF{IN6pttz<|2NPB8O54Ig%((Ah5auhoNq=r(VfST((e^f|{{Ytd)I~*wbv$ zna~O5Ze=qW2Mi`;k2zFyv9O> z%%i5oyR%C=19`=XJHcNL%4j`g<;l&x1RW7g1W->!LNzC`MnIX40jI*T&5lOu!LRr0 z)zcc%{ud)nkxI?(?CKR;TJ{=7k!MT!7kaq%osH$4?{U~?Cd(P+9Bo7Z_JRlTkNKs3 z1kq3glI}rWc?ke2`7EeX=i+-~>G4is1<8ng^Cs-n?GU&aAtlw?AI5#dj{+ocCk>>g z8bQs{oP7a8#R<-A)!C{$p4mlfpkeS3PC+|QU!osUll!9jViFbaj+RhK>O|rGkY*cym=C=W$ zFVD%HxPPd7NelfiiRb7S?N&-IUEF7Ok zapkt^m9v0R0e)9Y$4iqxxH1Bcy&xQmtrVlMe^OjxK> z)>_Z~RqHowkbDp-5b9cK=-i4VrwrpTItJ%6D~k~ddwjo|AwH~^{U?fbU*dtBW5eT( zaxDs>ImAsBZrpZruimM`BcN4^D}3$i`ml9-#l1fof-HP=(hufF`oB7Da%PPb>Hr55 zsRyKk4|7SnJ=cL%!DA*pP$vz2s{7fEE;_NNFdHU-NMz8O=|YtosDMQtC7MC1x$?cO z6lK}DcB~_X>04GU@peQ`_pgpFy;D(wF%BFzS*f=`oH3EoDuI>fLG|5fn!j)v#{XTE!vS(;? z?n%{NLd^(IhSw>g>!hQOF)qBBI4?A+YdFntnYnQf?z{Uxpl`;Sn>TL`=sSMt(ig62 z_ymnK2o&NDM$$S-mVr5^i~mZitFK`oUm*Iz zuNuO@wi2{M`Q+lBYEQmq5QY1j3KviVO*(Z%xgpo4G+qqTu1SbHRY_aAZSdow1n(*@ zwyGtuMIs;b{1%_0=v8`+1b=$@c|#tF%2pOwLSO5arX((VPX639-LC!n-A|3>4Ly#7 zuGzvq0DJnE>9}r1$8ARDGESQ457{{Zy%c@9v_fK8JsD*ZQElR;8+E zIx=XJ|CIg|jB-3pbEZ^KEWU!~fRPYdRL;5Zhv{W7xZQe7$E*=dPBg#lS{<|Fq?QiL zhvzK~B&6B$?)Rcg_XfR$2Hj-@@b@U;Qg4n(an^H#QQkQdIAI&yx^Y``H3kWpZ^`X5 ziRYs0Y0qFs94S(EJm72)EX6uo_TE*g)NISiqowGD^+b3NtTO#n7MzOAYRS(_uhyPlBJ zj&}c%0Gk(myN5!wn!oF(XopDD411r5t9R|%705x~h4GJtBOqy5zrlZUlyzVG4Sd-+ zO2AAeY$c8GJi%o1a{E#DP~nLtI*SNk10%YSY5>b0?AalrhqOaUwV*h{HdrdEDE~hAizuyC=j=7Z@!3rWf%8u`{HWcyVif#KUer8k<_) zJ@+|G#jSz86!Xco0aVfxusaKpDLx%hhoo(5e=0`O4t(AgcNQ_7Fe(c^KF+QrExUcn zCtL#i$*#?w#f|+HkDuu&g7IxjWw`f`=Oa0Bg5D(dr|dN~RW2x#t2Qp^!D(HH0J`%1 zVM4MI3b)l6Ds3g=rFE|vzu+J=GkIC#bS141ArI25NJi4g@72#yPW>q^9;&$>8i8xW zZ@PoS=**lSR6cnkE=$j>*xR>niM9L(QbCJgPx=Y|Tje*!QAKE-(lO_C z@zEEd%@hQCCqhxS!KRia_yRnS(QWP(;t(w!-SC^=(#z-=x3e}7)6yCqFI~i|2!0qx z!`Mb9p_coP=OY+gr-YLip#{rIWV1c7lL@sWy|2vQFy?x=oJo?f_L))r%*xd_0>AGZ# za3o=9i@5hbV{O7+*_%{F%n$bLI~h;!r+Kn9u!+i=dgl0LHiZRN#;s?PD)~RS^ENG& zwq6X!gdmEnw&CCscQ2COYk9EfE+u-N6}XDqwzSQymfT!{Ig6(9)_(h7n-M&; zgu>ssi}DP3w7^V&qS`(jKQLare)M7uGS~3sVta8#*5**$J?P^7^u(IL8r0q5o4wJA z!NBOAj7m>tR98q5koC!^k)3_sxH2uT#>?JpRup(g}yh?cOHX?r;Rb9eIIl+`n5Nv6QF|m)XaUorab>c9IjO$@WfSjZeP(4d)Z+%yJEK$;f_b zK6_zVk^~5HM*yxsv+l*wM#T-}_O-ot#Lw9VOab!Ah&DrG@VN^DQjr1qRFw~4OubIH zzxrv4L4wEKpCDEEwhLy~v7ERajvQQkv&fTCpY7`K}?>@vGt9V9@j*GQJfE7`rB#j5YTrrJ|#rqix<;^M(coh$Qj z%!RLbZ-cl`dVN9R7)MxNaUJKF0B$R7kHab9d7KE96n-CiB6`TTt6!gB=&ek7PwlER z!Zc%U3Z+u9HS~hg_DU@m|M<>h-+J0VZt{G?r#wJR!XAfBrnEud@LnTDYkqw>@9v>v z(wXpOqFOmJY@>%bpIseK&PBzgx1D%eRLi1PW6vKJKU-Fb&+HNq;H4P&UJv-H+@3GHYrnVGWH83cvfG^#^akhDjj^GnY+d!}f;a_p>6IN~}8(4bE zk!m~w$Hh-Xf?GDcBz0)#57}FAgVf+Zwkcv5dtI0Q=t-X&M^1IB#|R3i7e%>;Os%h1 z*%bRUR&lKy3?H)SeyXr@5(seylS%9nQos^O)s%iVv|`5s2VdZWW3WGeJM3W59R&Eq zoy3d$1LZ$DLv#VW)y`&6L_(ra7*V(FEOmtQ73-cr+k)}mc|7jb)2G)_u#4GY_DI0< za&gS6FORh}9rKyP8woN)>LtC6koM>sM6PXl5bc5fC;5N(b;cYJrMNd0^*)x4rG@{r z5=xh)GTm@l-d#)=>T6wNl^$)|1zcOExRHd>fv>-Ao_pqLYMZy*B(p*9#g6-~!pzNdTEZF7shjs3z%ROQc6DIz$q~zVBT1W?7Vw@& z{{s~$mCiMCjEps)9W1E@u0`2ofN-ywa|nGVi2#-5N{tg;c&5@faMOq=GIoy@qS8v< za$!%MP8!*&ffL*gG=jCb4~i`hAD0}uEQ&?Pr|%fJZrfaz-(BZub${@?C+7qMGnz|< zAuAtE&K&%FRHmIZ7K`rvA|xkdUS>)#^^agPU2Jc!em>5Al%idI;pXYV$F^54qlwlT z;YoTHHRLbR`?7%vzdvMU=D~q{qaW6=oo7*hin?86!yY2qaB>0?%;{+>wgKz;C9uu) zWtd(5z1MKKUHb)obQXZ%&OEEdGc1(2MRb!iN!5~jFANa%^l^ti5Nggh3bCK=2%LX* z6t#arp#&!j2rHa<;HCpjXv&j2Goo(6-5j z0U2n(1lAJ>i+x=$M45_WO3t)n+kSxv1+q`%@{%s751;x-TQ_)WObFvd((o|ZITnFY zx~ zTL-c8ags_9z2j(ACd3e!$3Y5a;~Oe(`XJHCz3nX3j+k}D$tDOF@_Q&iK8*-6Plr2l zk^`C+om-n@4C{a=iM;Dc4#kkKnVwSZyn!G$g!VU5E->iZQ*Gwi3a3M@8jK$|b|zmL zP>WmUF}*h~N#?_&bq3DcT^;CNt713@lNw-*j=_Hne*5W0Zv)dQKdj>9>LqC#WLwBU ziaP)H=@kidOhy_+f08ty8o6snm*rj7z2tEZiQ7dyYZ*RH+x=3yP~}M285G)|Ruv!d z>q{0Mks7yRA0Nc>2nKg_!h!vvj}DxRG8Ga8kJqO>(NAWlF{iaZBX-S+t$+Th^odj_ zW@Az1tm--WO8I$o&Ip?&BZ4{69Ot_D1}UN@cq26v$&<)ZQ(H1}=HTGn3)N3BFER4SMl&iZYw{-ghRHZV;xwU70bYsEK(Ku7Y2T^tc`gA2NQ~UWV7Lw$N&sJSs0f#3BS{WgT z20X31QQI`8jw+i6d=8(K-Or;Sd2r=G7B2}u#On>G2FzY6cppDvtRemL8oM??OE^CD zIb?}8zJ$m{R#2*e!$Qsvb*Y9{4o9phEWsPI0M^nxQ6oF$(=B!;M4e0 z6xqIZ%)aMQgz||tiPD5UxbbMgnB?WrS@aFzJac0w$GT6a!r=r03yY{!x0I-l`9wF+ zQ@?ATOYuHcClZrEN9!v^(+~y6TvZh*Rje^cZRFR*j`JweRjxUH$vg$d)7TBK`{S;54&eq>~RL4xRLO zNu=9f)mjY}NTvmd>mj2h1Y%Qt+7g{~9o8fa{KSMCBeop@t;i7t36yvp1`bW@<9=y_ z5mEz20kXrh-QVn2NAX(n{@n@{Q2Hm!;P1sVdjzSWXee4 z1Hn~{OvjF$-0@9QZjQC3IAFz~mphw7n(8XaD^w^747U^?W47orc!oos|I;V_N|EV( zR|j8F{YjuBELlcKGyQ(jBSEyT`Q z@2Xq3K&~fBJ{(0#)#Fr{(!{~&X-m+|5zFD2dEU4niN1+ zuRh+mAUw9&mH%{?&XqsP8j!cz=lz@6z;6%^&{WeP3wLcu+uh?FOlK&wPPi2kiWp_G zQhTFI8b7`f>&5iXzE>KPB5Vv+FdoQo5&|JClac_N`-asLpxY-M==Tttjp5+s*SwZF zZuNiI)HoQ9K~ySzTfP8{qZc z>Bk=nn{ONXGH0e>gm66$YHyZIZ)PTGxDqWcoH8KSNzQJRDvJhG*kCo^IB4RztX_2j zlF`2$0oMhxB|9VBabWI!$m39Mn0?tAP8-qKKpvgzEu-q};%^i47hmcgztsQ}=1X&= zGRQ4YACKrYQnHl_uO5hC$_?rdWA46L63m9N#7bO?eHwQPzh(lyuO5H==~-;kUoMP! z?(J}g7z9BybvgCGSOdOzsyb8)U89rCx-X{%D%l7I7JFtUgj~5E6hNaOH9J_0m5zex z_tNFu5SCq|OHl$0ad!(X7CQJ!CZecEV^@ouqQM3_t1*y&y2XY~KKm?k2x$(l?5ZTL zD?R1R41u-)ZsmP})5avkHN1jc+bwMq8X8s_E1l>j*u@&P>aBXzrN|e@BTY(V%o=pR z`}PWoRbN6**-z=39$Gl#8PC0MHMG>JgdHpsw%T=bWceoyn`ctgBm9t-kVKoP%P$f# z#nDRvN@swWrc^X4L{Fin40JTGlEh*XkS|@hzyz_j=D`wL-CWCD%h{R@n9ssMk|D(J z?r*Eg`mi$JyfO-5ry%fv-9{RCaxZrerq-{Krk56b3OT&Yiy)_cBodN`3Aua4(@eTb z)D1B3$CB2VKL1Z|i^Td4E3xJDNHn2TB^a@!5EI#Ac^)9B=y7x3{{Sc>;%7EqPg3#t z5>RY7f9JA-2o|u3N3wuJXvu|Rf#{Yfa7pF@tB~|!#j*~^$EhJD0zosj zI%mgI3Q4UITQo4`g#V?bgkXw@YJt^{^}sIJmHAG*%x`k5)x~;Eb){eV@4ZG+N@Yp3 zXRdS9zfGk4q}gI_KyEi>HwUv~T7^8$7OlAoE*qY*wHI>@~3<;Udb`~p7W68 zsL*%%_Ubf&5k4bjp8D6?cLzUwmuATvw0!SL%{LJP#z}qk@5R;c2|v?nX4|)GWXP(G zaa^$}i!U3O9ibNzPl?tW^2<4VD|ej_Z9Z_`g?Xb4vKipBxp4Co^ld_eYiz(WXr_ZM z-I~R)V~xB)>9^SM0GNQ(2oj!(58HDL3^T81bjhNi07+f!*-TNM)TsJMQIkNnn^Moh zRG7r>Varhy>zB~`rd#zm6G$&((YurS9`zSLy7sl0($VlpUI65Yqam1L$IGhftC9FC zCCt&0$$PeJHM%oVFABRf5kRXSV2|weatMN6Q46_IT=qzPYsc$kIV>er1otSjkQqLY zLw=;UQk8WO0Dw$t4C_WC)olclHb%!%XDqAy719)KsP9o|QT&J?Yk?Y+A);DcU-bPV zu^4zztyAOHQ}sBB^hXLUT4ll^rT{bvCQC6LgSei?A_&G?fJ#BxO3pKAT8YxBnc#lC zMlxxW?!BQG%xTAIorAOEDJ4S?Q0M}2ZGv|%Q|Y%Qhy*R!EQNa$ zc#v0s_FZ)~&4HzK8$dTJ>BXej&TaY|1Q0a%{mi4|+KAr?yu}`sZs<&cR(%*n~9m5(TKb!JyS1}=2~ zCPuB`ll2_NHhPGAR!Ej-TH?tee7gn`9v%#+B|WJaO(pb>lAY4WmV%2|olqid2#6XX z0_D*AP@b|Hcx=pdPaFpWEshIVDO-%Ux$*Qpx7wYWbN$}N>Z_uo79w}%&vla<5v+5z z!Y#|+Arhv-W268Qc}9ZyNai5?jJV8ia}p6a&bh2eCZpBZBj|n|RmK{O z+03i+`J8{I91pFR{F{i=z;3@s^t0Ots99V~S=eN4IkfB2t=Y3?1yOYi>WZR`l|eMh zDpEjEJApp)C{vXt*9sS#?!EmBzBJ6FSS19Cgle=O2~-{?i&|5m&uwgUEKdTiWO>nV ziAljInCvl8i$-^(CQ_8`of-3znkhX$*jUciB1bQ&bwDE|*twgzd5&E#EH5_yS-(%n z_#^&=UEZGc4FmZZOi7-|KtlnkWxaB2^%X1PZ*p&Qr015fiu$)Q2jE^n+q26Enj(Gw zb@_ay3mFxF4{jr%H+l#=4d0z5rPbx;JRj{$b%h@2(x5iQzc5h_!T5V=@fY&y zux9bu(lO~`q>ywN6z`FxHNB8TCTqa zQ(xLP1wn24FZq$)Pu-ORtW_I24=U08GxaA7B1iR8lmXIYHdqIOVkLLj(OYcQ(^gIARGni7Z#P!zpQ{2LW`YT@hNBK8^oPbqCB;_#lg{#JBouUGVKM`A%~%`UTTQ=GEB2}Q}yrB6Rr$1G~K zMyjs>CvhxU+GZfpu%DQd$Y`Zqb5BYbN;rSVLM$>9e=4U=wf z6PAU9Gil9akE3yRd>x|xy3)X4rqfE-kCnD^9D=lZDEMAr@q0uz^6m|vcrI5e?(?MM z_fPXZg0h)@o_SjW?fBu5n?QsBW-YU|)xY5>fpJ9zhPjEuT%_Ry?UXKzY>9Aynh@Yr zG$-HhPZg;Ic6O~3^2pJ;c5B`(0wKJSGFSgb8$V_m_;akMQTAE$feVM&-!dthy1CcJ zUw=I@zrW%U*s9NuqjjjWgTYzi%=HkL<#q8oPI~ZkNNoRc}t(~vd)CC{O-k(%c zy^tQl?2fn0r_?&Z`nOXtDj1Pma(0AM1N?aCT3PHr!@$H&U+gyHXfwr(%;yjmCCLC{ z5#=!ANfaVto5H$epH@%X#uu(u#KR!1xM46UG9e@yFZy(p=?CV2vRFLZFyj~~3k|M=SgSYAT+sp578C)3*uq&sr&~kPI z)srL`q$Gyy0rE-?lQp3)(kV$}#8B0}c+IL@&L)-kPRRtMe21XF<5;we4NsE8*xkZE zYpe|Tf%I0kI2cjRM8CLXD7B`s~( zzm5Z0vfehh>W8Ms0JRUHzCcKU$&dy<5PrEtz(24s*UcBQZm7pem2$f3bubLKKAnV? z5|=~AZO#YGRj&@qhUmOwhi(eVaBW8ss1Um$!0L)mOr(h|I(O z4tr53JS>C}iQ>zwhbbPCln1K?cF`^SDJyDjJ9MCLH+pBJr;|H1UK2livwp`YxTRK@ zC|O49s6ku;-ZMORlBxQIVm*a82TL817Y7~FXrkts%#-L6(oW91dJol~ww3Ff`Kq}s@TY0tE9#yBv2se~oJNPr|Xhef6gi(t){cK}-o;2EY zCu2o_O!L8*UY*qosdIVY-}*OQ*Io?b8}--?x<&A!qR@p%n7NHe_7=2Nx|oWH5_~|N zZH$I}rH;gw%c&Uoi_#2bTD8|mQzgN`_TLLGMh$dwbIQtDod3nSud=}7PM8*ZT)ac9 za?Ypj(7|&*JCK1*(L*m9LM=XibVKEeXm42A#^Zv1hImZySUGBnPR~Ohc!INSgKWI< zY`miOqFdoOvQmyw$wI7PODlKj7ScmY_{(dYHVkQErs1&Xv!YF@Ej+otX1&$vg(EZz z$v#Uizjh;a`AS_3frUZ)(ghL$yH-0ibunbkkmgI43L=YSG29wS zc=LZN$8ppEGT5i->jjTCTz{NAQl8T>X8C{ooMI&=0n^d22^5puer!3U-?|O*jW%*a zei|Mtb|Ueluo&jAC{jJ6{$w1;E8v=5r>s2@BEJ>Ba-=B)5{m%pJrqKrhG1Xe;g(7Y756 z0(A(Qy|%0Vvel3duifzU0A{z$UXHOr;ep_{vn0Z@B-t>R+@k-K3W)VYf=|o$*I=9#h$_$04vcZi?HlG60RGa2;G*bU8!`bB)f>OihYy}fgK-ONK#f#U2p=F`yS=>CyNOY*;AtIV6 z%mf|(G94#JXeb^M557m;DTmI=tDu54^hBuINQhY&HiVQZ($dgf88ca756r)5rbNX8z*OkduCi2AenjZ%{P8gr)?}K2PV3!`6T3MJb;=`#0Mrs!0RsBRQ z2$ze;J#M6F*Nz^kSL%b3Pn3$Ils~c6iDEXTyeka81IYS9->`v@(iLa#prMpko`gq zMYA08WUd--t5Hv_+LpFfE;Mq*m`7d18TY0X=)AK7yDR+0P5k<&>j{RQ>+628i=}*|(<;5YvQ^Z5hw?D@`-6 z)EV~;6|ZJZo6bo5y|l}U{Gp$ape4djnOaQ85_V_5eGz!Jy8U&4wmVye) zn6^4Mo-+kR)n+!P_Q?sv!cDUr*k>kyr9_M9y_ejM9R;-lsN;CQbb)X{A9`QQ>jWWF z(5+qk@&!nt$D*d{MQaa<2Od5^mPZN+V9q6iuJY1$to^G>l38`)#bA*p(m?%7MQtC@ zrHeUk2oId!WvxFA&Y?!Y$Pc++o#N{nZ^2~I4!Q(52~;9yrHJoFj>v(~ijYO*+|N}z z^zT`=MSm^qJ{MoU2FW1_ke*Hl2y%qc_rYO@O|#;Jzy4P$0)JP}hKY+tO)%|GD_wac z;cfJIZf<3sX~a`x^X=pn*Poff^?HSU_G)aZe(OFV8d?6E(7`I`#2>mWX0_p;`j>l( z0K@;jR;gP??H^BZ?ii1uhVo>h-Q+jz(}p%Vyfp~|NP#p?xE%T z@3c3);<4*eaO0x$cdMs{#=Lw!c-7;gD*vI5-G^43(O*u%#MAjsT!`gN8kzu_g+ZAE zIA;Vhxzh1V^#ZlCzh>9&J8MLH?Npx?`W<1!8oq}~*jIRNHm2Uq>V?DY+c7oMz8O_BJsC*x%^xNDUz+1Kr4DJ;rY)?ttl2Xyw~zAVBfgLvK{l)9 zdKf(k#ov$$VVn87j64Br0>|Lcjsog}v08*W>L1m%kqU2u3+-*_{yN&uqsNbrjiFF~ z{3H(is`UX0){_{dxqIpJ?)M;{M_vKFW`6!X|LmMY7>Y=sr%|Yl-xEzS3TYMlM{RZF z`3*dX;_(xp~lOL~1ZCc*ZK0l5*GEA-!Fa4SCA z?*ghyh(aNFHM+Tb-NQds{EtD>NiL!sZE);LBCK7j^#>_R26^gP9A$z$3uB@rX!e#| zFX@iD8<%%5VFB(T4Hf_U>)W_r!w4^Dy-5Bcc+Q*4ODgy`!5*i#1fK91w!8KQ)RU{2 z0z^zn!FUjP0*6e9-~~f>g;^uu4J2tu>UJ85N_FM^EdzX>``5fq8|qg6h(wpc`e$f4 z^Ut{@isLM^k-xYogU87E7#x(n=V&m^f(Zwz47lv)?|+;>TiZ;T$iw+9Dyp|8oq&(Wg=3U}_hYf4fGPM+Z^`q%PC+9_p>p6~?!l*` z&ta*QZ_czn-?O2(P>_@j8)7eeD=U;k!{}!RTGX=~BCU&PlE@_yY+=!|fr|A)IW9tU z96fqWI6C)Y&xV?G{wf%B77>UEwhfjP8Drmlw^qzvdSJs!=Vp>5tf}0I8If`2tJF_Y zT6bKy%s>fPBt9b16VfUqCN@)o>;Xmr*wYWxyqk?d(B5W5fE*ujvu=2a?Rw|kYaJl# z$vE!#R}b}lv@UDAe^DpyZ(vNZOj3GoI90cvc6~QcKX978-+Cti+`7G*KXtiFr4Mo) z5j;vOMGa8tjEIDnw(|_8ui0~{H2Ff;3AG3V^LC*GU$ji4SU+Y6R#MJ(BRdubD}$2N z*4f#8RYiGu_f08>{%}vw2sFdZGzOa=Fj>DRTzeu9Czvio4z9*6D2fn?$D}t&Of+Rh z6Wj(ZjX0EP?GrEQC(IM`kA6@b53zQ{z7+Qgs-fJ01N@x4_5c{VCx&mC-jYo#;*^E{ z#}%c~fm4Z}E7#0nMsw>s?p~15*N$}_h}xp4CkyQM4_nl=IJuxbw_5l|wbYLv9=NG| z7~x^(@{QGd!T+`XjMewUAGD*ME?)NGlekrVc#jW%mwfn(|KlH)CnhS&mJE*8J4&Z5 zXNAF~qFD3InisajU;PTpzFSxOPTKVw6$Gc^B9sT_^X}=W+#wl#jqQG!W^g-{={h#! zWa)=Xvh-x1i5tuEO?!Tfr0~}D!askkX+Aw?-PI3Q7WCWnXQwVIjsbyz-&0z%;~XRO zGHt6J(j%gcj)FL|)_`{nle{g--l4%pHCn=Sku&hXaWXkoQ)7d@bLjmE$vlAbNO?Co3Ul#q7Hj`VK%;%8P8FeHS9^cB>Fu=gOJ#)WuAlyh1<=~k z(lW+--mM*#2;d}4k(_>NqL8 z-T~2>Rt5guZ|hJp^NfN60+QdPe7N+%4P?;+D*AbB#_2irvstkg&R+=Pc`Fi@Nix!n z2#pAOzfs>ZDa!JX*aXe46H=o|r>dU6Y$&{vu*!4yVm1|aFn>u#U#Mx*hebuKvXB;W zA+u^P&a&Ra@`}1j-^Ogkl0d_Dc#c1~(1oz8T&NI>Jnt~2rm=T#XgoX2(BryRDIK58O9XN16qC`Ym z>+D>Xr^gZ$CCIQ&ORd#K$tI;~7W)qA^YTEc!oVaSRHWB_Z>7;k+~yCj!3|dZ%ljz* zuk&^POppN+=5dH0Q$wG-|`u9NY7)_sV_VGBi;furG{1IyVHUhaGx*$_yE`toRa5 zCK2>tUb`h6_>uV0kbRU^eHh6~JFTqs|48nJSfxedDc&6(VQG^!uZM4`K83np#K6dK z`LGNm4xX3$WyhVE@!`7+|6b9T6td$Q0*Fx%Nwf?QVQE|Y$&-%)LKkCR&&Ed-3bs6& z-PSDV!~MKwy&_(gwR1E}m0*4NZ}mc*F^DlFH?DH7P5 zo^HdMGKXGLNDVex*S$v@o>L8j1_nii5&E}-X)Td&{SP)LiNuu+eEsyCpThRqR^OkC zE?z($3Z}2@cYT;JbCTs395J3duYNiLfqdYVv-XaQ)~Vc9%U*xg-~I1xsKaeI)pOh= zIcEz2og5G+GH{?3_5y2Y77+(l7H5pYR}p>sw0Za!wO3VOu2$*#KP_+OYp2ZV$yXo% zqo6_$I`CJ1=99H`;vR7SB~-l)XKQOTzcGRGo%C%rn{d-YS*ijoMDxG}i&n?qzpbWu z(fU^F?-pMCa1Z<}_&C4KhYoS%k#Sevi~=+!aO1U5NluPG+_(1UEbGY#3LBe|DZ4M~ z8|2HO$B+NxmBfZ6b_|%mqm*&+p|w1rVp&fX^6mmvBMW}?bCYka-QB|jH`%Weq8`XS z11kd$EN^0`+Rq56(~%+Fn>~GMqF0z$<>XX7gALBRXRj9R9dSU*R&1&LVeR^DN<+&t z-Fo!MX3Lh-XK_{8O32%+_N!&4LwjV4LrwHz>%RubSho8^(K4ri$C$YZ;7slw{fC{8 z&)L9|tCixvmTHP-e}LVGhyJ85<&3{rs3lVmINiwUbjoyadr+rvFqlQ*AC~g~XKELZ z5U3pL*u(;n&8p{J@N>{4@sYK>3{5k8VZiOzZ{Eo93&hPfoW1hHz;RrkwBuvz9t>Ax zYI3;Ze95=R1q(!%?<84Y@(TE}1GmanoHVR54DYcSyRzolF*hY)i4Q|OsWN)UCx<3l zeW*2-cR6=_rF9d<{JTYqB1Kj%zE)NcucEcwqTqXuC*;rqB(V0h4-2ji$7R@ZJ>E8~ z-w&SM`f!Df+Oun{Bws6xVL&{_NLe!Tqc8R}#R@FmDmiT&N^qz0;eJ2)L+;BS=)k$4 zz(ikdv$eIImFUvy7d|#LYmA}?mK5dnIGJC0^35sLN1-mcg&-(@#SiPJv!uy61XT3d zB6Q1La0&t}ZA>q#z2v{)$e18c%Q4V9Bb#ULj)i(HThDjZz9c)=)?ava6t;hy^Yt!UT(q%=%6E+WHU$XEuaq=%I7$Jp8H zr+4ABkS+fLHD;a+7_+&IV@Vj6m{ZEd|5UXlY!?B4=5VSLM5ml`xXkZE^%nQb|EPqQ z%f|5GmAr?0^Wjx6@&Bto=wTj3>H9HZFS4A9*-aP~`Zd0xn3U*+BtKowtdKZ+TILGI{Y6Q{@=8&fu5C)kl2l?&&Qnr*%+&tb=~}3+fEeB3?^6 zo7v0m%!mx4bfpH}P2yApX0JIUdx#6sDGB_sm`2LW^@99-6C zs9}+kP&nyY86r;)y+1k^ca)QCGNPn+2%8LTkdWXe@9^YP=L$3V7iI8eDdgI;ng4kb zvrJ52XP~>Y*1On@u)IM9$yvWNJC&ZRgt<525sPVSP znZY|{(z*Wl;|=}JT=3LfkM;QnojhqPJq3;qV~q&PNe+4N;#FJpX>z^_!ep6mY>i+Q zl3^y?4Vg=`BB#vT_YEB+4v6h)8Nk5T1rpOm!owKxqjN%-XuGIl=irQ$$cVDS6H~*W zAg(jlL&c&PA0JO0YeNb!vC5uc6gQ@xqq$*8?I6a#NwhaKL~>9T#|nHWVh+0kr zaqztiLl(l&MgPz{%b8`GbUscBamgXcdfBhxa6y;JCULpH+Ti0KvK^Ofk-J6M+q^0< zjf-R;Y<-D8WhmOLkVRWVL{Uo85XaV?zTv~Ki0WU?&jKr&SM>?dmazSt-DS4Y7o<}7 zNZnFCtCT`9B6>X77?Q-Bf1Sd)=hAGXED*2T$6BggOlQi0=$P&;Hf5ZPVDL+{LTss) zJbtPP)>u2XSt<@Y+C%8|IhYdZ_Oe`R-r8>{3~!hON+rg#(C~yYoFm3tWk3oWRrd%? zAkGl-w+rL*Vs^GY!*N&248dK{eo~tCpetgr@zN$tdPK>@sp%a=9O>nLn6@mEiWJ(4{nNp!d5ZE=E$_1k1>OEk?qPCgImWQ{;Sl zfY~6F;4x?W^y`<^!be;SY#_^mCUZLjKq}SEqqFE+!wflFHZz`s8q5BQ5E4l&*fa_Y zUXhZZQb>HPpOHwNWuBtTa45|KJGFOk*u#P+E>8N*oBLTt<^g0%xN6|Wu-6h2$o(uK z_w$DSUS@mvyZ_}3Kc~XgmP%d<#& zD)$IL8HO-x)7emdNE&ev2d8pq5Ga31i=+jzZ38HbnD+fMzobV%^V^N^!RB1CT=F*r zc&O7l#DP7A$O(6)5@s^U2ckRni&En1Z0O#$JZ4}_ ze!PvXZQ!$Ki*qa2O}kUw0t#$0EO+2WZ4tP--}IL1YQbg|vu-F-QO+Qlx9{=tlHg+x z9zS*|k4{cb-gL!Z!ffX|{w7Rc?#pX^$00@uz4FR_f2+wE{xG~N-@hqZkc~MjOCZCd z(=jaTkuin?H|e=`?sRx+k(64e&zSUge8cV+(&&_Fi%krX&cQR*IgR^nkCBu?a&n!K zNQnU^8)tpS#rOTuZLLbJj9*NSQ{wzlu^CHjv!YW(Fb#8Y<5m)CH4z14uU1ocVv~

3^{-gX@nasaB%-&*?Qj&P@;xbMGg#YJ8{M2dIm;L$Kntz+^UDKb zUY65iY{{)ZnGG%~#_;Ct$i6HR{hIZzfG90)F*rm zPgc#-gEzdTQwhHEEih~B%XNRwIYQQt9ZxC)z(mwcbkL6v1-Nk=#Pw?wjQU#6H2iRd z(>lnNYIkymQ(kQ4K52(77F1w+xpk|>rj&!dz@%fnobGP@Cb!?@{l&(`hx#OhOsYH; zG&jr6ZmW-dmQR+qtS%C>AZXCtCsrW?gA%rXlVq3G^C^XA)=P@~-||~*WeYij zp578FE=HE%LsuZUA5nj+A(E_@$eo5?yl^41+Wtb{aT{(w{%2tlnIa2j??1yoVx!Ln zZjAmDb6MG2`Mw^)3h^?ZNI?;HhGfBGm#6i65!`xx*3o%MpP!n2x*2PvMP@|oV-$)R za$bG5=$cVuND3{Lf|wD!LZjO{Rob}_bn;@u(0AYIV28TOF&VM5$v{82p!7W6$%)y{ ztHP$uQiKrzEx+=tx7B#wg=bNrLNDt ziqlhWyoU^I+j>%r3+0Bq^{unsiNmX9;h&aR^0=dh;Sn7($g{+@$dMJl@nrio?;(KX z{0Cj2pxB)|kCL&kW>gAHj2SiGp(CGltw7&1Xuo17n9Cs|v88DDsBFG{oAR8NEGMj8 zXhM`5JHT5Op81Ar=zYX|nYA^^RwPlNG$AopML9slo$8g(al45{yNl*(JQoJfiJ4l7 zi2MD#F?z>RxFM8gu;j+{D?k zKjVI6G?Y%NTzn9WTjGK*k8D()B0iC{HCg{BxIDk~%er-4DTZxQ z;%8BM>jKf6lmfbY%%00vvHbagz&5;+zY=E%g}!?1FV{+wB5OMFWE05~5sVVnp@gbJ z+91vstw)q5O3xjpX}FLBGGKr`&MxY*$kJ6A;|4hO-9U*h+AOJ{b#kf>a&b`< za_Z7`2oY84GIXem?geL@mk`Up=)$WkS57rl`}i}IWeLHp`Emi%RoYt7u53!Kka($y zD-xY83$>BNgAzaxz96Z{z@ySSpx>f?t5Ga^2&~s7~f63W_`h^UD=F!7iMKHgG)MM$(?}(5O|L0U(}>}Io*sK?6T*_lgS$>g+|KK-CsVp(r-!^q^J!0fRp<=&x)rTghx zfbJjNsd;}&QU)=UhC({gUd4mVBf*QaSkqo3$5MzdR}Rzw_nqYIyq~nbOfrN;FvFT> zOZnmWWW-itAA%)?i9OWDW<-Y;5b1>>d?D@n3q9vsIH}9Ec4dE-D1f@rI%+`4RV&<> zm)3Wj^c1vwxIu|jA*dA0sSB-fzB)Q@jzBGBv3 zTlJE-X3jpy(c>)wMyWq*=0zVNryUsNZ{u`0c>7B)(!e2{B z&@w;SJo+wQKX>oZ#7*k^sQ>4maw-1VhoAlzFM3Ao literal 0 HcmV?d00001 diff --git a/sdk/ai/azure-ai-projects/samples/agents/assistant-yH17pFqN8YkDalDYSHearWdv_image_file.png b/sdk/ai/azure-ai-projects/samples/agents/assistant-yH17pFqN8YkDalDYSHearWdv_image_file.png new file mode 100644 index 0000000000000000000000000000000000000000..70c85e90bad3f6071ca9dcacd9ac3cb5ead1927d GIT binary patch literal 155048 zcmdqJgZ%~SCLy3}B8mzkA)$+k1t^M0i%NG5GL&l!CMc3pBHdC$456YR zA>G4(h=?$BO8?%+{ru4vV`-@=@Y6; zmi)oLWXW&Af2_d&^Lfl*2mT>tfAXsRB`ZUFM}1p^B})4C*5+3B<|fy-QVeYEOsp)$ zL=KCH?b~W>Z*OfUec*t_|NerAm95c%iqI2Pc#&1sr#0-BED?B5{Zbq5^ft0hpd*g1FdbcN=CWlDc{NuqTc*Wj6 zW8M1o2B}v!Y}gPvJZ!C&a7D>iSbdVR@#0&JeVh2DY)_PU^4A558TkYR?9tQH`~COd zdyDz3E{f6I^w*~yKYo0#ZeDhok5I%*aWl=72AP^a+VR_1h7T7eKde_V{`72Hj%JF*>vJ^a z5b56esSZacSBB$I$ExJbCi)trohQxdW3>DC;~dzH)Zuq4)gn%DX3?iQ z+!uTQT`6E~+Wz+Kv5g0GwWH4Z)_#`gNYt{-IcdS9t6jZ%)vvtGeQ{pebtXg0!A{Lz zL}$F+dC1^sRewwR1eSoW?0r$u1Usd$tVx3HS)n7PY;_DKcyA3`G9WXU)xV{ ze=k*fzc|QZWLkIrL)I-9>qj5E5_{n|-lX9!mt5@XjeD~*^Ap_^gfGXw`hCr&PaPc- z(GA}}2eDf-%^G|2&WgxW`U5uYyZQ<{sBdn1OsZ1v^@&vtahDXaTU65xKVQzWWSQUK z{Pg6%U7%GsKN%#qIA{0q!v{%mcT>}Nx>ZN=05gl7kWx^)=0M)7wfxezvi&}-F>6Xz z!xnSMH7NIsS4;5QZ1$UxPCivlJw>-6$F9fzNA3P!h7Absix!Ha(i{>}) zqPmqsW#RX8zV&&y@S^ulw@uu54{==Jk)OCx$xo8sgQ& zTv-}Y)bZGM_eIx%_I&1lZ~J1mN*}h#iyiZGV^z@;9A6%?N9=kkP1>?;G;?gD>Y8q* zrl!7>cQ0_88&S-(U|u;;IA=>=wIs*a$0sr}atGTDg+{xp_~|9Q>BMOHfup|7ofe+d zfk&R6jq;0g5l08Ji`IyjY3)`CtuGOD@2?e~in|nZ*sO7%E4BpLcjERxF5s{QsfJyO zQ8uZ0eX_45Jw}K<7MY-)Vv3{jQq0J+wzf7{cW|`3yfw|>G2Oi7bxOg^+2t!Zs;k2l zWUtoEd6;dR`qf)*yY%!mB=AbvM*1Dl3+AmHGOQ`%$+>zUH(xq6BbSO*-_}MqSv=|E)Xqa~XD|xMf?md57ls z$jHTq>vsCQd>JpSo-FCY?f!FEKYzUT9C?*fPkFna=PtXQP_Ve*T%=Wi6DZHsnxeqd zWXf(#s=K#peVd!GY|#Ql2c>rI+9f^L*3_hq zQurbzCFN^ZSGq-_W7P9%{+t|v4pbZ(mtb;scJ{uWRU8~XsE$Q9^i!{n&khxu;mPre zoBCK5&QlyI@Ak*V#cAeKGs!A@$uC^ZVo|h0EtqN=JNr)ecEr37wI60$6_QdU!4YPd zfG>ac%$XY0xB) z|Ngt&>CvYB@hCGpckI}Mu2nuQGq2?%XtV#DocCIk?Pn=FcJAy&ZIF0k652^;T3(u+ z9B4??5W9c>erZRCo{*5xqo+?BP}DPQyU$v*=e4}4uqvF7CeQitfr1OE*FTP$O5A2E z_f&?c6}q`HMzLdY_Klfz3+Y?KuR7BM0=V;ketrB>%FZyH*V&Hz|LDICx#t5#X9gKV zm+9sj2O8t8hrg$0Ufnk{UZ)~%)h@%RI_ocz)R?4|;57ChzuG{=Iw6zPqs$($bnoDgD^m9vopIvd&jf-17`Szw}M88^|2A&o%c8Q^R}RPC849opLww>UN!2`+}zxbD2WH5cp>Mh z!3O*!gEF9rj<;9c;nQRG2wu|k=Wr+GsShf>s>fRX`e-FSwG>@e>#@y(f+-P-UZyA? znyJ_CSB4xl()s@N<74VzPS0V}`aPMJtkw`6Kc9t7X%V{H@MIl;D{xxQIZyT5siX(g6r0O zRn_qm(J}oI5@Fk2M*a3&F<7_#Vq14Hj?=_Qi6GP0!^0!tMv13sUG&qv>dA?(UTrvoRODb6%*#JL(-eQZ!sDw3MQjv{XwSHIZ@w+Q>l9@mi<6U0z@m3uLIlBHac_YPq zlvOKHxG$hj*bn7T#p5i}njd<3siXL#nkHZ;n_%-a8Kf<>_4==g@NIg0(kgF+rt|)I zj&1j0m!H-wyDC{q`On!|VHp{1E}7xm0=K>_wP?$}DCj!=e4DTn01dC4OLp;7zLKl< zO*w5(wm<9_w`i?D=ozzJE6cKhF;rm2ExPl_kt;WfpVVaAbjj#^lu5ViQ7*E_=L&Ya zu!s`x{OZ(wMN3Z$OQlem7Lm?ZI8D^?8pVb}cXzgeiWb@hIc{p{hDsBIEGSY4^I#w&mE-BJX15>KH?V&8ZQVSPSzWoHY0Gh1ns# z!Klh$sR--9o1fv8w0=dJ^?uEaPcLr=qINFM54pb-HF(5(#Qev% zP~J@Antzmnj(nzsE?>FU_``$0X7Ex;27qO}M=6QX=L4_rXXCroG4TqqI<^05bZ99) zlnyMEICJKVW~Qlct^@nK^)$9Kh4msF8jd2S~fg=6gk8Uwb~IgnFg? zDe59$NWvpJIo3!I#Fq&2J^jJ^N@uwF9hCggX z?VB#f3y$Lp@s*KHu90pibzags68~IlwWBFTF9rV=ctw3lvRP|JoMA;kcx}`$m4JMLSY&lyKB>{C-`)il2#2d`Zl(;-C*2t=ZQP4|zpzp7vX{((^LlHx78C zzn)v1s6knPjv81Qg=vvx(pMix;UrDHt|i@wrJFL4Jx;c^Wp?KZw*9H)@fzk;DSQ%^ z^`87k&ubMX#m4So%r$Mzb0F12KBsq_S)(s}mCIe(E} z?B*;!-M){T_3E+p*#|q{UcrjAyNoH3mPi}*@!62SFvF&O*pQBDrG;*tspItrPD&>G z_igfTlPntM@x58 zWW_&}l&~|Kbjv9ZG7^7IPxDY8J$(GQ4u?QTS4B4f_(Ff?dyrM5QJAZr!TW!=8?m->{`y;Xyjo#b)1UWweo@m;n*r5Kxc#-y4kc4-BhRc}v&POuho`(n&itjcLwvZF z=Sa-QCZJr;B+$ zG5OFECrZsy@}nsOtvPwg#=D)Unz-6LAJr^J=+`RdXzGcT3A7ORnA{wWdj}C+KG`aj= zmsbiNYbji+YwD^u({os?ypFR)}u zMLpd@iy`XRu|kh-9;m3kQ$&|-@m-lK_Sy3JGT8pB$!S2*e> z*iHV0?h67ft-kCLvW-s-qb->Xe|cwV<^JXEzwuX+UWS{OdhdMn=#i~+NM-iyAVUrO zP(j0u-TU_9YXUAD>XannELpj<(f9StcLa|Zly6dK8OZ8L7LcW8=U1xhxi%!Ld*LXt zaUMic*x?=mq8x0$XV0DuQR`#?%Ykq?43u6T9#OThmM!Zrl0yy9-i)U|P%Mm93P0B+ zBwZV>u#({P)&f^XWxRw3* zkBcWyo)i)hQC3gZz6e;gVcojdpl7l8&J?a&tEC@*d=09@?)X$yrGg@u=GkF+>aH;e z7Eqr3=(jWMBcyP!P!!^?roLX~{CsL|eqJp`nZL+UOj^1%$pQG)bZU?#?KYPUEnx?U zzpv1xT_q#tJ9|-}Tl1Z>uoN3NZ;rWrdx`H}H7E;es@Qq~`D4S277+ zNVySsC0Sczer7_~Dzx z62uB95xoVB-!i9rQ;gdX&RZmU%MVnFQ9$@WQ3E0vc+L5J{PRYzZC%tf3uIT-RZ9v_b=ijJ=%OA8Ip`y3;GI6vkBdmqmHL^1sak% z7B0hm(T#2Nc=LhAXWJCoT0N7{5aV#{XMpRI3_vihjD7#Cv>1}1q`dEs4h{9>lAg~& zMyIztk3_4GQh&H{VPS!aQ#?^2R&}*p_0lB*j7inDOp(~Vbm>weT4D8W?q^tc-YKE? ztVQPq2zlnSFyE486{g<~y2Nmh4eV%MyXFg<9mKx6=i=*ow6*;4P*&b*;$XbebDKWt zB|8~#j=h?LqyB}8TpV_{R{8g;uoOiBrw`*j#|CZ3j}Cn9h+0&;l z;HYlgv?*H4D!)Nw+m0+SA%Zzc$+NMs$)yauBr8LtrxaaTJsEk_mzwr`=Lw>F;DDS} zR=$F#0VMG2QP1bwjCgx|?^ z97>jRoskr$hx;sSwE~!F?8QO&kw4Y`Vh0oo6>#GQprrl<0qkDK0DEgl5c4?Lt$9xA zlS3U=S=xptA8n}NcWets(TlU`Dwb-Tz~4@HJ{_o!m!r+U@pUQ`E$_1m@&)PB13IQF zFV3)2$lmnz^NWVSVvK`8cGVUU=ORuC@kXM>SEIAniN#A~sbj{$+CLjU) zAg8q4&~Yr!v|;b(kfZ7#3#rcQcPN}b{XjFrIQ-w`EBipiD=2ztkJZj-6#VYzhl61RF!A{C4np;_2sq_f?1ACA5-dUBs!J(&QI&W0ZO=$o@}3!J$!UJn0)het%n1 zQc_)X9371F%t}LvS8I6>pLU-c@t9yZe0jSxNjJZ>b9%3Y`3r8)fg61@vL6H4BS2?Z ztM?bKV=XNjJXcF zVp7xJIH`n6Dy}!s?%WgJR}m;C)ovF!fTl$rG-^-EzV(eRE-shRSJ2fDs27$(A(-j* z*BgfxNwl&S-7tSZjn6?6>L{S302CR87HlG2Ct5iq9(DO>dB+ye92Q_jA2Tau5GTmI z$v-eAG$=dbON>11idS!Eaeg@K-o1MVoZu_Owroj@(up1ho6y#+T9}_DaxdPYO8@=8 zJEEeZOdu*y`{H5_yUpFoYX78@YafRq6c5o>nt5;e8OVn|1#_e4CVtcu4Zo@62fw%k zRzHE=Y~=jHuLbf?eT<5rQka|$&Pl0F&bMzjvGbJ4W`gJ{%ji73`cZbaB&UGPRnW)M zv>L^;g*k#^hEH_erVSV(opx!CL%Ps=&!OW~igq5u^79-q43V^6LoqwAK;x(Z1WGeM_M(>02#RSE%Jm6+wp2MEipR(6HF)?$;xr7j-Wnx; zbqlC5p?M4{7r*r7?TVfi^zwcd!;$@PcW5=3SX**}PrGDCmX#h+i8U5>?b^B1WO}q4 zGM_&>eeZZ~zH4RqV>_>-L`KDj{^6^xTLbK3_vG_N;VVCYv}(tD>q6R#QRN%bjZ~BZ zMR%6mTP`GP=_M$ij5EixZuw{ge3U*YtTlMYv?p=(^_PLMPr1%cPF!sob}|5^qket3 zI|F5PU&;&oA!@(u=%?+pET<6P&4--w-&kz0iCMXtSzHHN$4#qZSd{!H4BW$t8}1Z6?NvzP6UP?e1ILZ9-|^_}5?htuy?Gw$o)l z0$hIQUdzeZ02Xd>br<6|)}8k?kGTxRX`5gTp({xEz$w^KzT$g7)CV(wRazQ^00Up$uQl{o_$2tccQ=L(&J70%jh0kfY$KMOz?2Qv9HmqWYf}Yx=umsP>MM9 zv=+-`H)rAX@L>%aD$y6R78VBIT;2Cl(&n`Q^~bjTdId7f9Whv1{}1?3nQVt^D6}Ht@3+XiY2%n@vX7)gJY37TNu)It zvBT7Hy*3_3Pu{-OQ)%KN4|vG%rP;KXiEA z%+H^(5Mx^kKAX)B3#CKQfQWW;npOt511@>MfO#-b5b7fFQD_o1QR5Ou799blZ$JQC ze|CB(TivJ%mF1PTkHyRP%Z>df)Alcjtl6}$x;gbmNc$x;X1cvX^sMX&GakiHnE403`w3pE^J>p5xQn-7pcp@o+{TT&FcB;*e($vvX`Zyp@ zcMj!?@~|slrcKvbaK`T zb*V>UV$N3C19WC^YqS`#WKf$mVsKE$l)P^M{SPE>BNZMfkm|yOLPYJ2@;?j*%%h5l zubd{+GlabiL&5qd#}45R*QpR)mywVAJnolTI;*yGF#Wx~Uz1hua2b1Abnn6K+qd_n zbZ1#Gqj!hi`UjBS-i$dYCsDeerViS6Tb=lKgPf80909pc@!#l-*#Mr= z9OD5P8YNUn%_9OXHc#WqR1gy4J&gmzGwL_O<#C4TBzmS-LI(O)Jq(u^RP%BG37}RY zbnB>l1hQ^*Jta=1Jyf4KbcJ}J(az#Qe4JLe25^se6G2I{z-i3zbHE`571;@&q6tcG z?Z=rVRxmqgD?edTuYgXaC%%Zu6%>Gzka;{2#PC=D%0q^&`E{2ZaoHO1U3EFl2nBzLAam)+2uvyk{L?? z0%5dEi55=y`GU2vxnIKMAuNBe5S)g45`Pi4HpS||?p?e3(ERDn4=$q9%T6}mAWIGQ z3vbT8uU?42XM!BU&vim$QbV(-U#}vgubxtoy7vqay7>5IRCq7RetHgEajC)d?LZYx z(D8Rh?Y>Af?BT$HyPqo3sIi^N?xeFk5e&GMp8@Vt&6`_pTKj+^+~! z24?lyK?Hq*v7p)M8%Jsty5-Bwk6+54{vIS^;S3R^mEo8YIW96gJ2jNzJe1!gz2hC) zLMC;>R4G`p=HRALqSetYn8e|DBjOI3jfjZ3U7kl1SFOPp*L-vB--7u`Ew(~7O7v3! z+15dqfEUEm`}XO1qlbq@YetO%muw`+L>W93vJP)K+UG$91T}3-~?D&;G08QrP`nef~Y>-uf z6mwfS8u`W;3aIDZZ=yO4U0&;K%q3t&p6cL@E+=%AM%Lad@v!Y0zv%Ql35VZn?c+G ztcj~M02C9{b?-=Yfxa3=K{ZejwM7PI0{Hrl20|XIpHU-0rt_L&WykTRsGv)dy zbp*(c>6;_%(uO#NNxp}7?VWzu>xW%hm{`Zd8LP1~J15>{dX$hOuu-f1K`4hh z2iD}kyIk{>|7fGN9s_|3(r{g8#xKHAxeSJQiEgmTD5b>W#nSotc^%#Yq22fv&mgsg zKNl4ht<9KVk~iq*?X(p;{5mlBO#SmnsH0jCBpN&Vu>Cc?m190OmgPxlRy($*cK9tQDKB3(FJUnp_l1$#+`Mq(d>*4y{l?i{gfi-nBM9qIHq+* z0dxuJ`8PYdw)K*HK$EWX=fTd)fUFbWUZ`l6gne$yY~QTs63!UR6=tqzfSzLlt(%rc z@Mo3mRB%z&UEwQ2CidmWpoPtzSk>QBX?a7Fp_LRqApUKzG0A!?3V0 zh<|;<-=`b3tjHd6K;1QDJOy!&N4$gj<=>TuU1#iFA%iql8KOuL0`C&roMCc_eRYyp zU_1Cho9o9@dxBHVnnccMc(Jt8Zk}-%XjOA;v+)RZ89k$*&=1kU&iv;z7$;}>&!0aD z=B7LpmXni<*UXG3j||BVB1b-Y_%OBD)GZ5^KGZVaGSP*`mQv#-A_$O>3c@fT)CU{w zmwYS-vc>2;E8yIJh%{!}N)hsc;JBm~p%0l7K-5jrJrHi%EM104%_9d;KL^-SGSj`X zY%XLUe3Naxuh40nTCb`$ds-NJ=GsWi&iTB6kjcystcM<%KUR5^!WCNPw?6^xhWK*S zk&l~`3e~+c5bPk<7^&-Z%*xZ;#&9Y~MVLCOgO~<^o7i>rFyt&)9G{(;>4V3R%D1ctO)aa< z_Az1Ua3wVfW1m=RWNVvt>Iru^HkrZflE%88y#8A=ZC&x;P`@08VirigDXbhKSFz>@ z27+wRkY;esb?mbQ%`6mnzZcMyaw`NndIDITR?OoKLa(73{J@@YxSE1}5C`KIsf|$T z)L>F`bF(&=e76wKDj_J8wO~4y{xZIYDn~v&->w#?y0eJS=cwagJq|2!>gxLH5D#K6 z8j+q~-44S>yYTm58&35sOC4x^m6ltaz(r9ePOsl}z8bYD1)oq$UJKrpE&zBQ7IAe(x?>0Z7ZdN6a_|&hs5I%Nb z+CrK^c|7#28T3NVZU_N&a1_LCzMjCMtC4oMKdbFEsBIto5z4I;fA!#efvkMJ6l~47 z{PN#^`z;ngi}u?&BU-IZ2{=^$AOq!XAh&&L6BIws;@ktOkq=t4tWpHs=V~ET9gD#oa#KR-|9bjiylf&mENI|Wf>d(62Oh>bZkEqT}Q1N0!2yhr|(>PG@^R>9i zk-z``hv;rMykz~l=|r5gxKQ;cFK;|aIbgXd+2tF`JOQI;W4{D`zX5m0M zfDRh8G>}eWpEdsWgoA7rPJ8E)d zhK@P|KUVbNvx+1|U_%n1mgdSAja7JgQ2$PN5F2VO!$XBm$XXF+XAzftB5W3+5|0Zg z^fRz%IIKP=LUS5?+8hiM%LLk?NMS1M1ph63dZJ?!6Ae%}I7^oS-8P19TK&jFfci;4 zLdCGNvy&omA9*cD`VDHy$B(@L5Qls?xesnF2^A$eKbGfx0@r>W8<_V>0SDhD=Yg>* zr`!NuP?k6xCS_MVm<<{2;(@&3f9RY~@Op4By~B-e17DDHqTH1JM!3JEvBv4|2=#yS zp85FPl*l-aOwa^Lh0VSx1VfvF71b~BBxlUfp496Wqc8*JX%@qt5zb5SdCKA zWBud@2l9CtvvQ21ll{mr4Xj&hjtZr{INPzvVE{}dA+yKf1G7g)CB(Tc)xKA?i0>f* zZjonRY&kO6%6stIZJa}kkOH_%NQ(7Xx5JlEXv#wOEEDZ~`y8GisY%LJVNFB^Mm9jQ zKWS$=*=XVgiTI@#YGV)J7!Ye4*?>Smh97Va#VTF|2PeWs6=OQ|sGZm4BIy_y!!4Xs zM~thtGFH6oZ{Edu#}lH8dCJ+Spa%eY$9|Ly&RWCKdoeKFJ^m+^Ys8B=B$JIP(z3f^f=BflGVxK z)14lI^vSQ^Ea)~JIK2Cuh0^EcPM$c?%*l80!VWYB@1>D<{xX6JbLjjc=p{`zIWG!u zoG_lWxjC>%55CHKB z2HYOIgG#_R$J$hP@sKV^w3oCZ@zIzMj*QP83!&e|ma4%2#}k$rEW z+d(`yLE4|61N zD5pn&A<>p@izk*{{~!Z|OhHwJn587-mT8Vsfefel9xVpN!msO;uGQ+L2y>e$9qfRom_W)QG91Jh>aY9=npRjYyKEomjx~o>b|=l;StnJAd+fw= z(?Yj-4)>ns=Bx8CmZOv8LE6^uR<_TTphNmL!FMH|IN2C5Bur4XqZ@V$C+m7Kb-4mv zQwEJglG_r_^(yR=d^7-cSQ+)A4p<-==&TPMi`GqyaZY~e!%Y(scOy=O^sf_QHzXJ} z@rAdPN;`c#(s^w3d~nMy>*mSv&Uo;OxQh{|8co0yfL^;zLkxRajI3WGr_GZ`k7^-h zO_T|z#9}KGA0kA#496{uqoZYVfSG{iqTG~;en@lv7hIc{=JBZ?AAu4Ed{V(K(rhMN z8DQ0Tk28Qtx{~ClXU}g3O4SUZ9f-*H*iGa-)cBuL-@WH<4n**1%c7c@wDMp&fsE1< z(U)n!)jt5VQw_!S05}Nm!t;-H+XAzdO0{fQ#9iFim@$;#g8&vXa&6^~MJ zDCUUyCW0+~fAc&Np*HG3W>avxGK4stc>jSSCc0pD{hu|Ecws%4&Rp0FdK7fTVsACe z|7Fx0L1vz$yg|w2&vvJt5E0o6jHz~K2ed6-+MEV>Yn|02|GjEuy`9{9FUMZ{!K-!F zy}G-f?*zeN`qDRk2&&toKS!nJ2tqY1s@|wYH@^rs^PJVFbkP&L(A`zC$%J}3NUjl* zZAn8*L}NVha2+A(4i!HU?PkN1%QWjxbG=XiLLp%mviNgmCf8mrUCEmSXmd7>zCjW2 z(79(6g<4EnhLP$t4yk9u;ybXD6G~e{F56Q;ffHCcNZkVu?apn|XJ4%dS43{aQy#I-eu; zW6mvd9hin!=9V?RBXjOkP0&PW_IJ9DQSk+>oI^;(%1+c@YV5@(GzKuE^jKIKBQ8o} zny#}0StR%C(E0xN7(ZgM7A(w|!5un{Fi+)3`@6|NKZ_L6O5=`#{LZ{okWrGv|+qDOGsOyIll| zv1ZxU?{u*gs|?Wy6V|EeK*tiX7^3l6UH~cOlDDs+4JY^f4w|D>kq+>E$ zZ0O|$9CWLLtp%BZ@2;mdi$`bku2GA>d=Z7!PW*_0CF;>-a5x8(Ks0L-HWlai%bQ+;Sgz=rv6TKzV9G1^?w0@gbvd4{}%{?*w&68)p^o>kF^aZ!$ zEIpk(ZeI<1UqL-aY84lk#18(OZe!(v24dxXS)xaiLld+zjJKhpn%%huNqy}W$uG4E zU-P7Uoch|jJkdvaqFFhX&ubZVpN+rx)~38L=euPEHUb23Rj}2vJx_zgb`eNayGGkk zb{39|&e__uf$nz|g(kYj)vx|N$LXBhZ>3xOZQ@(y(V=?m*>8<+{vqb*Ie~Ih4Dfm%iFPfx3?S0ub=Yr3Dp&uuM zJFrn)UP$@`TFejSvPJy6lA!F+I_}TUP6$mMtMu|2nXGLxuT9Dh}DI_u6h`!N!t#9`*deUE>|bi1F*Fo zqZLE79r@6PL}XoLZNEL(CgA#?AL-w>H_GdAV2-{_N(t+;7q%e#heUn<*rh8NW4YFJ z@FFOmf76Xw37e=OS9SLx^QxX?nTjl(#s*8@yU6RnZ1wjOx{??`ik30=(ObjUJkP9b zm;AN+)-pqRp{ufZU%sDYSROUZ_GxFHf;GTV_*Y$wN)xeh3S6C84oy0yWS=$JPoH#~ z3Hp8?vobYN(-s}Wm6q;fqPtRdqx9F}xQZKAaQk~~N*nl^58wz5MANgJ@F;jLsk?Zc zUN02~_`^eJLRK}=0Gvt<=CjV(-)ygtgbXY;B7Lb+x3>dUXAmg}>p~(tMYKZT4x-&u z2OPR_KIPI12x5;n3L9oOjep2|HXtoK&2R(IB{4wc27-ZxdQu0n-@^x^C~cXh|F`oEv>y@bl{$kJ+9_2$XJkfK4Ts z+gcFft=8o;!0C43!ZR(d<1m=IMnyxf{5PE>eE9HXNdH87Z$R`dOQ1iOH-%o33ASa) za$z3v`*x&#&0Th9v$V9l6qUPYm8a+3gr^kmuB`j}H$8?}WUJlSP#7~X6e3*CoQ!=| z7o<)H`weN`tp&kZLgrhQ&_bR=h-j5|E&PnfT_@Tb6UcFj$M+o)rO7o^((_n)rDBzSc z-YAo0>StEcAtOcu7m#dvAF!%7OlACaGHfTZ^g9^9%R@J43sZ%QElDMlTehGCBmUHr z>*kr9c?sSsRnGQ%pua?OV@JqrHdrl&1?vyq^h|Hs)tpXEP;J5!8R6^YL0t9xJTXv7>-?Rb83RYKF#nd#_PIilyV$W`M z>grFN%$26_Wi01)nF1a4_4Ui9Xc=Ooe=p=Y{kNk+&vKOYy$T%4}~vhnRqj zqUpliGzrT?bdw|KJ9(Bhw@ZFrX-#gPXX*Q02-|?!Tt=cE0s6z7Jfa_lr_m&|&py&+ zP?9>gd>C0Z}_3yu#OcB5{L|>In!R*an%!UcaPD?seIo$PZwSXm=B{ zbU)OtN)AQuaMGd7KK!TGmu1X!Z)8e}&AyyPv?QIe;>h+&QJJv{G1^Z^Vqx1>Z7UT1 zA9+m5?mOWol&}LJTGc;8-d&oq^sId6Svi%wOZ@vrrn6M~pjvSL_d^Mrxg4wOh^&+c zeb{t#YDyhwBom>DjJ7l#z2|?QvajGxxgfCgQ^>lVXMZ4zRS%GBw4Yh%UWh?B ztIpslcNvEPZh7*_r# zmBZwcU~$(`Ut8OAS{Vkju*Q8DE$Ec6CiQ{@fQDqJHDGkLyH4rMPSAm^(bI5<>`B= zRM+h#7bVI(%RmidKt%lM-JC#?N6t6F8wO*Kt5dj`Ev@UQZxj?hG#K>BNxa?Vxc)4^ z7(08@z3^i)15_zGymHYU{9E8i*ROn~kvgw*`rNrX7|Ggh=ZYtP&P-1`Wh!n^8L^+U zshpLkvohBT%C$*f$%$!*y{K>?f7}~N^G;HwapqbvY(~L{26Sr`Uk45NEc!wdoZ;bTi%~PmJ=~e+M$hh!zf->J+K#N#l5< z)LeRi93S8xjhN55>Oy}gJlGh#fe%M>gC^oDS^6`GCu*WD39%{T-{0yuvbt-f+nkQp zPk5!5Z^+3D$CNFbEl5y;Q?i*5LTO^(=Sx*$SPbXV7?g`z~w zPoHl`gse2FxxDR1zj1z31iQPr9f~Hc7X{QyW(&Q0Pu#l@yXON#ry*9YHpm)0bm)*} zzHfg;&P%&cBoV3CD-4Y8VeA)slPGiIMwMF-==+B_CaewvfG5fgg9z)=130a}typ1- z@ChyLNpX5Vp441uK;kJ51@S~>M?P{qjL z_hEy19>@!?Fe%Je2A0cS*1{Ja5&5@b;YoT`hRn2c_t21)f55;)ySqH*Lqg_5P9c9` zsw6&rWk2!q%^cQ;^nz8uJPk&}L`fkJ{ltlxhcKfO) z8o*3_!}oRT0ja)`N@o6-jp&i&Rlzrs3{WoWkOH)aQqg0|BFOFB%Ryw~6rCxt&|XN; zmkh0F9)YSXJ2zb9Bqx_=@ml%n)c_Kb%4#G5R4mUY>3k$u5N^zVYd;`}ST|-*bwE?n z{4nmRQQ{@2-d*NvmpO=n)|&r-m?7FpV#5FHcHJ1kuyF=v<(4%q_e#f>X{toyQzcGvUBXy=g%LOne*;nywd(H zX{dO@+B|C9M#&kIPkW9cvPwj%I{NdR<447qW3+^lCI}__Uvzk;1ZvBwlbJ!<&|~Ba zfKV+Y!Xzr4rUM5Y%Ap1N`1pK_YHh~T3jqGSxu;xa*CAU&XWi01iM@rNh7M_5Ag;%M zgF=|C!tYs2l1cpmUaytNPlFugPE;MaXbg5E@bDuZGOu@eU2SbYt-kbJ;0uK8*ne6% z%!akV!7^{UC5si)&VLtz1Qj03hYQ~^CJ3WEJ73v|7#as5+N5CXws$cz4T56p5Dv&H z6swY(P3)XHia}UJPE>S$*s?bx<1l*uE_HSuvNpSvgN^qH!$BrpFmbYw#G7P2uFrc)9`&z_DBY|oK3jqITn5Y z)i-agFR5Y%Yaio5h7$cMg1(iZvTR%aG4FwnQeT^K9N&IwnDr@;IsM#&4r(C)C~IlG za9NLr{1pac0`6t;`i8y3<{nmLdm$tCZG4naLwy-CdkJt;0YvF1QOI|HzS-~=#y{1f z&+{m>AVkih-uQS03ByyKTY87Zm|YD0q7@-BPPM({(uEF6k#KWz`az8h+k*d-DbqqvUIF>>$(TzXz?s9c<&gy^8Z@LT)aD+o1 z8NN`dv(^~{W%JQNmbT5#PtwtYjf}rd-dr9JtO<~ndTwjg8sx4iIs;AcMyuGJr{UN4 zQ`g8q4*?AQIw_5^8(-wX#a9LqG!cQ@TCihi^_%@xUp0^RxaHf%#vSij2q`H<1ZH3(%F8nJ{f;=i3m z?bMe(@ZaY6W9`~T$UFLbh_XRcIE)k34NO=@Wm>gnn)Nl!6f7 z{$*sR!R|L~VJqu@Im@+y#9k2>;V?iF(ONR)R~11`(P`=e4y|mE+LOTECAuQFKysQIi5jfAdQFc7FS#Z+5(#U;*IP^cI< z%GdVsfRb{gj&a;Gtkn& z44Vffig}0w1xJ;`0G|HX&p-FVb|)j&oCc`r6)yyO3G_uqrFo|S<13WQ8N|^!NhGsU zbXzP&%J-t4I?TYco`FNiYImbRIEyFMu?&pSYDU%?8;;oR_-=eIC;4b#@tu0xHnh}%kVjYUPEHZ`VEcM3q*xhlD zMlur|-VglOfZK)AEG|oe+hK~!^j1PkRaI3&*POH}u9YboDnWLPq~~Q?DEjjBK*Um2 zk+wbP1GU5q2=gV9q2VR8U=qM0!}Vkmhw&}Df@A@kZ+P*Pky#x~hHYgQHxpwBdFc%2 zX5(4w{DT;JIqUgUl_<==W@{4wVEnAOoVtSjI~<1ES;LcdFr8=|JYPpAO0b(I@OOWZ zvG$+SU=0(P67+{|JX$8QG9F>0g0S=SO0#xDiC4H>5dv`aRz`}lTkJBqa>x;yg6rhB;7 zuB}vmdY&XYenfcn=kg0;+J)pTW7b7=AqtDMV$04`U}e1#UUI4kWCyBMUV+NkPcxbZ zlN!63Ev>Qx7jdl0!;oELFLVY*R@4{+6@c7qBI<+r{9jj`Ai);Q8@&~G3ZO*9V|?Mp z9+bIkgsW~abQ5Jd@jl`^xR3_vK2v)f0s^jzBGErGZ&nAPUgvBiLJa=I;dTthu?gxH z1+=0wRNyQ45m&X=T9Ig=L@~$rFl0)~Ly<1B^FoG&bajz%NntXsBViS)dKqy4h>twRtes|N1Nf%hm;3?1r7eMxVIo7z&i0cI1BGD$4=e0d!4rsRcn%437=u{6PIo*KtP~fUR+m1UWV7H%ivR2UGxmz^HuE zWjHKFN@hrv5$Ok_2uT3dAr5KaD%qH0XOuNrr?IfG(AS!ofB`SF{aOVsIfz4wUwgX* zH^DTwrjsE6;E)t3n^%x-_O4C;wtn3@ZE8T%8+`@eJs02492*1L?t>nvNjHzfV{LVC zS4aO`*7AuMYuL~_=oT4%e7q>U`;+~m{s2ges?-g0;K_O{?LsUoC3-C%xLY4V}#h*crB+-EE43uSZvlC|5 zPr)D}Bc~XH5MnD#qpS<<-Fuc?aDq%288w%hA08rX9({*HcJDr{~NdcmLdVCF6p^vD)iA+JHcg)mQvyE{UOMuIxui(sBFS`-GV?Fom_&c1-! zD!K(5#hnbDEdG=qFh8EUj6tb~{4q8qw zKZC_s2iuF|%tMlh!Bmor%Zb(gkg2(8=476mOx6&aVlVD(3qOADtV-Xf$Ai`BFVqh? z$&Mo11*j2@Mp>c>IDH6VNUO&1cjhnhVV0YmJr7`WgH^~lp>NUjm56n3(I@Lk&W{u4QxgtO(X(Nt^CxV(-UeZNz0W$EhHmZ z^%$l#Fg`X9PC*9K5`bI5DnhV5$+dB3dO7c)>!WKXWVlS*GRDG@oCOdyX=0wx<;Gm7 z{^`dAwSZ7kAjytheyvPyzz{c!Mxd(hS18cMH}C5c;0(n0?Pvn^WS$r|L?vnGkRfkX z65ozGyOGT|iL1tH!Dn$G_L3pQLEQ+1Eif?X5*m_1@?5Ya*pLFNUJSI5TZvGfGBC-a zhD*f|g7qQ~1``HC`}S2lyEOthLQ>*n!T_?u0miHkzNL5xH_4C@gGheiXOSiyxgRnK zhTijvd}2IT9_o^O_}QLYq+UY^Gex-|4B(Q&b@;D3fVE8z{@R{suvtXA8hJbt4pU|O0HE|s7D!&Hf;9P{rxwif)m}D#r(*IrzbbEVy_xEW4nFFb4 zl2Ng9%rzM1MsG9amKX;{CMgE~$__M_Wq}y>B+G}B#G%s^jmb&)FB+6?UDO+LOi4VR zqnj>K)I?4tanTwubZd5NY#-!4TvXw0yC5fzkywJ5#v%)n$#Vp3FrObdjje!(x22#{ zg9PzktVLGqb45vsBI%6;l0kiGJIGOqN%;xrZ<-}$Bou(h<%=E#JMN})Xqjc6At)I6 z8rb~ePn=!I4PH2hs7>ndW71>}97d43yQA$6IF2awd!W%c)GN&Y8)FL)UP02?_$C-e zlXPKJ;HEAz?c~|z*)t)PvNaY%H=whz$iRQB76S=pBtDG51z;EvLs|c%=05?>REzAW zIA)g4^me4;*^xOO062{T*CV9&9=!62+u!G+8|H>M$r^#hOEQF4*ovN935TxbXr zz_u7}BA2_6NK>Xc*oP@HA`rwmpVGahp2pNJKD`(wm0J6PGqwuXQ=wN`oz;$visBj6 z*{M;r8J-B@8WohI*!h*V&4N}${kU`YE`d?J7;+*>6x?FQwmNz0hBf8s14RC-V=qRK zfh8)vR>K&OpVS|aNzHPAk_ZR^+_{e>f-GKFDoL<>x-uW6!x^zUE&brZgK88l+?}xV zQe&7p3FcH+Ux1cfX6?EWUKsL}S3QoE_DWy%CejQ-J@L@;1X3Igxn#$mq4}~W1b9_K zq@zG6d32ROxp4aGaB+a^Hz4&G+Os{#Yybwe2@o{*dL%;TSvm_ zCll7#9yc7*UyiIeTr2n;Ltop=4-H_(aWnKilG->rl-EVE5(;gE$xG&zau$;8)b-`) zo}^-+s^+sKLjrMG9^Um3^FubdHW~h2!vIAxYy?~oqfeQWUaR%YdN+y$Go8T#lD!lgrEI#aL5Bi)HzG00YCpCXV0+f7 z-=IUH#0M~Ux7L$QW8dn{fpZC>i^CBIxRBhDD_Ws<==3DS{lAkx?M|`1W7wQ0hm+tH z5(ENF4@xMgmp)ehW}*RV9O~y3DFtzlf<6AW;8Qv*{eXC?kh4ol0>y^*S(>sNtSObF zZjSIkK?3bN8(xtNecz$DY~aN4Db`tv&{~JjDnn)!g{)gF>!2%TX%NW8zsvByczI^2 z3=&tWNyLz>o?$ar%d$?LL=HrC3|n`lb|z)30y*@)NB~L}f`OCNcvFB(kaaumP7 zFpKh0Y(zk-@?jTE_CSD7;!u)YQpfa}8XL51+(I%*01B?Kp~U(VS_FZeZ3N7i4<_<1 zxP87=k5J^@erue(lO%S8()9G!=jb1UXsQdzzab7>0LV(-Q@!9*+xh6XsSRirf1$Q0 z5gkyvXilP(on8-6B{w`2u!TUV@Z3r$hQ7E6o#a*x`H$b^RHF~p3Om0CmxNp`sm^;h zS>I8|g#FValG1xWh;UvWn1(I)klMnqRMAdBVPxT=MRX&`6!f#fQFS_esLP8eo%?5g+YM6z zLKi6&14?Ctr==M%8>c(@hmjgE3EK@#$@@$Zx}u=Xp~IOV=H*|I;}Y1gZgroMft^#X zumHsX*n)z;Xu>-b;z7pGIin$~F#b?c#AR}YrG7jdRa5F6;8ul;w=smqfl^U0fc`bD zWhA3ODcFosF{gcjn7KMUgU~y_h3YKI+`*bOV8@fTuhijf=D8&}T}mQVmSBhLu<1Yi zNBJjhRZu3>V;Xie0hKw(56=a5M6b?mHnK;Z@eK#A_CxEYk{Cba|6zF1Lx{p;qG;V( zakOE=d|?#2l((+4YzP7Z&wcf;$B3Oq=wG(ScDRT9tRKLw#1wv2V4+A1WvOR&ZK(j_ zxf32Y|g0Em3;5@%C7~B)&W@+nz()0}@y!@(3*y{bVM= zm!<(747)YWW{BXG8M)gtZ0-HF8K!W+nY z>*+t$rsTQ$_!F3+r~(WUEfH`|h7A?gUnrGt*eGzWQHoR!rPCg5JwAy!r<>ber$2iJ zWN0%P$>KS9fj>Y{r*e>V3v@(jEzC!*h?^e_fUW_kY7HyC)hCiG9diB!V~ z%dDi4A{jEsHQCbA-Ao)CnV8P~oNp@WSVjgDrdh}&gb(^IK_bmBB$gd~Cd9FgPD zi=p=~6WJr=MN}$cAM7)Fud?rVuqfn#1W1c4zO2p$0|^B6+KePr6>uI8AK#I0_wxcb z+ykzgZYn5U$qNiEiE!xC#OhsrG#My3bs&=}ai=(H@kH{aBAuh#Yf7Wh;|ITN z!Rs5GbpdOU`xQ`U)K!XASG<`df#Wz*>uJeRyA1{5>PZ`vqIa!8toxE=+&5ZLT;j@* zgZm3f&f6O(CE{?oj+%aU7^)N5C!}e?m()deX9&lOZD9X(bU%a#9xDJt&qkn^N68NT z&_-{z-j2h?@c?pO%J`|#ooXG)R-yn9vkY+ty0)XJA(z&G=*2_7IG@?xZOzI4@2w8q zpNf&o+$2Q?=^SCIM=oXLI1XX|I_H5D0*=r*4{zG%e;+}Y3yNC7J$uZvhc30sZpfg1 zqqG;AgRrE~g#%k@5V{1j(bkCPA@^~74Q$YLqF-`x3_)6hHD_ix6I3dM4)UaKK^T7@ zd~Ns!d}(N8|4#d5)T3%xXQZ6H2Oz4YGHnmyeju`SG`>e5uLEKvg9hOwm`orZE}}Lf z>hVm7BGw!4*6eZV`Tje$$$zlF9r{O5)jr#y8!+^J)eN>N-^fq6>>j$aBi|b@7`mF_ z?-Pgmc1Hg8-+sAo*zvyt%XQmz$b8qnGd)Re6fI4|za!u>^aimiNQIQ(2cHmic?l8- z1{3~ZpUrnFdi=g^`NTV8;-|^czgA5f`EFyyf4K0W7L1L-ynp`L&>|ExZT`i*Gxkp~ zSm4e6*Z1#SQfja-msZ4%!4;Z`rnuyjub0by!;sUnlp*Nw-HnCjEPOSb>X- zC|F5wffcc1xy0X(A9;Da%h>PVy^tJQ83LV#&YYWYl z;BW~)?docu%q1}9?4qlucb(8Y%TNEB`Te{3tzUPcyFNjfmf8GP(o^D7z@W7h8B2=J z$fyO*X;0bn2zjSDvNJzo0kk8GKx?08`W`q}RzSA&b1^!+;zo1tAZaz!Ki`?T&~f1SdvOwpOlZTkJE=_UT_ z@BXhs#Xo-e*N()kac zP_Y4Nv=Cex-WG2hwe6?RcIe<5zLh@>jV~rj?!R)^aa(jkRB-i$Cn-9^(fE;A=Pw7G zxc!${-mVL(iW@ixl+jqnId6;G5?}S~J;ZsA4K?5r6cdM3q9YG>WVi}@Z*uCobz84i ze1tuU^eY#iYQe^#rTUKtJQB`7--R>k>GUOnNtS;nM4cGD6bL?x8%DF`=Yt+C8Va!g zSb)F&F#loH%fGfaJo;tZt~xDced(T2Zi{=hK55p9vl$L#{BS&zsEdeZk$VWKeO-avu6T}7pMRHzW!tF|Ig0Y z&oBd2)A!oOdAFTSbN-m{5VdHtRwcF z8i`Zb2e5$^lYT?{fESfDRs~+`G{+B*>!DAD5Q>ER(P7L=!QK4V!SKtwIlOBB2P1%7 z93w%)lsIcP0zbD3fQGzwUkgWf3JCF%tNkd&aMVfWI<|VO{pz%853Cru_e8S~HDu=I zaN73A0Vm_~!}F64EOz(!!9ywj0MZhoPB}Iq;db360WcNc$rz#FWTa;F&qI5o==F{2y-2fr?nP2AIBl7qtl~ zEfD(XI^3mB$+&>22%Y{10TlJ=n#G#G*Xz0@;u?_v5SCEb{8qw>f3SA^AnV8BbQh`C% z7rA6^xQk z2y8KKDH<@8kslPJtB+=v;4i$>(rQ#{Q3!Ve2X3PNAJCpr=waqxvV?lYX@fvjLZSm& zM-nA<7bQRC11REWqDY~#-xB;A7)=FwH`0ZJj*S4~MaH=;-h-$x$$jfEC@b=H2RubJ zBRRf+#8-y$i>z9R&0vL)hc+-?^ZQoB+@d(X3y&v)Yf#DJpi@OvO5h?``3Q1z{p7dS zH+3qcMl_8Dydm3|7y@H>P08K(kWYFHVGjCD(-}_DyLz&#A&vR*} zS^XjDgRWZJWn~Kpu>0zk7e4tEPx2q!gE#R9FA>Q=Jp=%t@3UQ;NW=cLD0W zL$^Yf1BU1y6De*Zs0{!2tJ$ST9Y|;nh{hr*Q#^*so5~pizaRmm?h~K;g8=f`olqz_ zC)Ew(sn~px`2H5E%Wu&3sGjYJj1p2?_>B?}E z(SRiYMPc4@g-vA&tLT#1ZwW6#fjmA1(m_L1l&Z36ct4u5ZI)CHq@*rmg4@=BW*mjs z>@_rRi>4f5TO6)_a+(ZG+wU3!3(rSqFxnzbmXT{QEfYF}V6=v|LF#Ei&2EL!WoMdS z+IA#ET}4GT>7am^X70a(Lvbq+|A4}&WS+XAPm$#>FSQH9cc`>x(zpnqf3$?CBtvSl zycG}-)%MMn5W^5`iTA?>b0*G}2>JD}3nDIBy4I0!Pf>5We6Y|>f#_6k~Rvq>Pt_j z?K^iu|J{F=-JL)}2yg^szD*AvhDc<31%T}JvUw;jKt#*2NKLUUPaq!P%SZL~&}83$ zK11t_3L=vd2_o?O#G?WLgRV9oesTOSKS_GM?eBC2aocRx2Zw^_v?Tx_EN%IoK7Bg5 z5KXF(prq2Q6Rj*73xS0p03j;OAZZW-vG0hyw#M6H^$^HjT?-NaKRD^J#Wu#0|2hcI zl17A^Kc!o-zraK-5?n8_K=jHKgg0ui*PJ~bXhbyhgWF-J66d!T7fvQ70H*k2*%Mjb z-jBf}f~>R$st0&;D3Ck9B|u0gAT(rcOb4vqdSF8_xWgCtB$O{nwVw@swZh_L3!bUT zcJV*O4tWrQPwi}4GvK*I!V_@f`EaK2tGRy(s2q9ezu8p@yaQ%6o7R?FUa<-Q7DATW zz};05SqOrswKdQ2^QMcr04j!VDM;7u&n@^JTW1^#0|AY3ZGZPEe61&(5$yRiVzl)8ciKTMKWVe-fjTVQgvEStO#%k5MEd-F0oak$ zs7PBHMwB}$eme#Ezu^4m>I|AD`nDv;XAfP&^ck-VGnoJ>Co$=5JlL`@Wdt z0Gj!2!xZmKSMnJ7p3~OS~W50jHA=ijg+WKn`I9gC2eoK>U zyMduXCq>i&QyD@h>XB1j4~DxFtC7s~?m1*h+*~2KdUbJH zksVj+nhm%$9AYqLqe*)nnricRi87%H08;2t{W=>Bo8U@qj0UB0K01BHlb`%l!x4uf z9Cuixu_Dfhy{v&xIZum^U;@a3v^jPB135m4B`K=3C+=HHX{nMi#DCAQ< zEgMGSBD-_sfw$eE(|`ssY|7R?637~=B|@qB>xk=RIL)J<{x$~iqVr(y9`BB^W5%$Pq-LupB{+(S7i#VV z<`;@!X&^C3vx@*-lHnO-0OW2_0UG5SkVPAHA%f1hPB+?!!+fTX(JF!o_pWOQ%1{&aw2CP=P3BMF;=AXWFR+FhG6saB> z#a9?~WZ>-HeZS+tCw>0ScvVJdH}`ZN9{p}RoWFPg{7hi-*5cY*5s~jDKc$%&3-m)e z=dM`k8hzH?T@^)jxfJ5horjO7ff?!Ufy=k5^lHWMr$(8`B%g=`m?j3k3XKXc?bk|? zscC8Ud(Rv6+M}~#*@@|S!s2Zw%B7g2p7Zef>sID}J;bRix7=~=?`D~vPOO6I7EpxJ z=s7?#XF9k;(o~LBhPwxQuN<`I>2wlucMd=Cx5CWc%RPgfPJvuCm67v5lE*kBbm}~f zU0(z@@B1dnbIR!rj#LJOLMKubPPessR(3Z+c7!o;CD;tQhrTd{9EV<~KEb^*kUC(>s~igO z{9`;lJ7e#f0HR&$Aa0t0c49H-0Y}OJXRX+OiiX}ar;)Df&F;tOmiP4b_OAJc`BBID z>R)bu;}HVJ>$R1pNi${15ER5+KZwk-WNjx>oEQ)&`jTm&JlT#{y5QO5i@%5~eBAs& z@4Cv$v+*6`H{nE3GMEQ{2!)4BVt1hxt!oe@p9qM!Ix)Ui2)m|(xXymnTJM7?YKNl0 z-nv#yCxzu#C}gTjswWc-M&V!nlUfPQZh+-h1DG&*(RiCy-7__3$U~ka6bv$%q!ck; z2aZ`z(G%vj!Y3C>HP4#+XmvD%;i@W&Je?bH&aZIL{9U30@Z2CdLQD}f-4E`NI)F_V%cv+MY0!j!sdfw}yR zq$&O~sU6m2Gg0mk@&yJoP!4+ZjKePfLeoOzpN1C@XE08 zu}PBl^?eYSON%RbMP-@Z`|TfEjpbJyo~-~fyA;IuwVE|#Pk4BZy?WaLS30_ZBojW~ zm9S8lCU|UX^3VY0c6ZU5mv62dUpv`^`$LP+qn$HNY(H-uYwq^LqLfgc`5G&y#(0#^ zPJ!G;5w~KFNWCDnj}4-?NJ67IZr4(_-7#su|CquM#2J-g=ij(r`sQJYmp$oe*(RR# zcXPq9th<++JvkbA?I!PkzS2>YPa)oU~dG-|U5cSIoS2J1HD+6sjO4 z60>VRfJs2&rEks8XjVp2kcqnx?r%TO%t|^Qo;~b&4qxUC$5wH61$dM^kWhRHGf}d< zyyMoB&743IR46eUUVYOy-j4sUjV+<}L^=*i8gcW<^G*x12|P^HA#cXrzK znYs&>FJGQvhcae=zcU8Z_}{XT@4aq!{bKqWi)Yr-CQIgAO`Uq5xzJ$F;e%wA4{_g3 z5=cIsJ&1`G^qNROBT-gG*tAx>L8G(aTT5?We3X$1PXqgpcQT}uRx_2MZ|oY(?Ofw=o$Q1L2Vp%0?bANczvo=PW?%3? z0WZJo%7abP;Z`5SOw>E{rE@Fxevy8?CJd&<>xwx(e!PGE6*zyWAa`I(72`C$2RoOe zWpP_jSKM2C2S$MMq!>ZjNXE^+_gg-W88>csa$jDHjcjIGuVGl8|=;lu2w>%=9~`ytJW9-KOU{Ac@u<7K^y$k(a)59%;h!KL6;Pxpw!$YcxX^)Aw8 zV4AFlr?lrYL`701fivF(;?*8%(k`GKS05hT25L2$IgVO(0c-BiG&%NGVHIW8q;X#` z1L1l0c?@Z@xCb7yyb_iLTp7 zC`+u405iR*cYiHJ+200{vLvPkkd^cx#OSf00}n_JqWXnR^t%dOb=tGdgK%@NE?S8- zoBgE&-1f{5J9qBPF}se&#??iF7NuY5@xi^7pn#G_R8sYc4@rQBgC$qJ>?rl8(0NWB zURpCyA>z=yWU_nZv2b=*T8pj228;Ky*}ZrHq#GrM2j7!gAPI81-Bz1R=*)Z1Tg8U? zCQY(5+Rm824r)V2mzMCPH5g9NAv zB{j{x&fzfdT>TZQ;_^50^2k#R`Ug=H(J<2^^>7(^2IIUZ_hfzu?cy^^L5wl1TIhVo!xx%=1mKowM+!F zE6C3U1lNx}gK3CJY~cs=;h6nqC>o*%Z9cpV>?`;NXa|kl^MC?xTT%-gDN5x;Qx9AZ zK&yy!-{;NEyV_<_+XvDcG7)fH8yCM4M-fRNlMXEqv1+D=~42$l<*()!N zYskI>Ss-TAHR)eJhA#QZsYefxCY4-SQ3RvtuHVNY?=K#V?y_BjbU?{}8o?~DW>mg~ zx`A8DclI=(9i+8+aMuypnHVLxyXlick?03NNr;4VE@OsD;D%ani3Y_90^`Ifzk)bZ+ zJOqi6IS@3mB<_LX99h0oA1HYOKugbxlP-|}W6Qo^%#Hz0K!XslJyn2B(I*5jO*3^+ zvRz?d_>6wp%`Mnr(G%8CH4ZdrDt`BwbEmB1m zOz_Px`~qLuQi5NYB*D%2Qm7NlkUM?m%wd|U(1^9$3`h`m=zZ~keyFwk^X}JhZQaaz zhCh$OjcFv0J2=k5Pb3Wej!|Tw4zCpe2telqVF7?sp%KSliS01+}SOka* z1qL^vbhvk}TQ`x#!sjOw>7j`sbGr1=4Wn3LsT55(3XFd*GwArmo2<&Vfpss2hZ?%x zsYW!2*ET`o$HUDXLhB1ThZ@vDYmU4Q!cu&l^A=qJ)DX2ORiY2Ln6c>LFVdbOI$D%Q zQIi7Xb+RmyOu@ru<_FxB-cBSDjRY5X>)S-fUy=Ai;xl~IKOdU-Vth0CT&QCU@*Sgj z_w1pDs3Wixp6acD0M+Np6b3C9QnQey3fYHXt^?&hbC>K!3Fd0Js=H8UW*TR1qGcm% zUlLX(NeJ(CyAhy5Va5(?>+tq<<1s!5M^x9d=?Vhq6JS9Ps$aTj8*V2$vc$X*fOZbbJf>6VZ%K+z>Z#>u$lcc-kr zJP+IfkC19Uq?4UkICz&WFtI3tuucs#uE|TA_zB#%x|$k2@e&+$#>LJ6&j~}P#wwgV zBuZT8^zH7w#*(IHCo&y{OVEJ_ls6p$-#~^h1lWSf5CVW}EI!4C=nv!|g=E2m zxC2*8C;CtbU^GV(iAFQc_J*kO<3nD9PvHrUaOTG+k0F)=D~kvbSy|akYhRq;G<1Nt zqhkX-2T2e`#V90bc=%B;yxtBZ{Qi_ZBM+4G_;*(L(JE@`0s|z=La(F$?dfB=;-K6k zdt(x^fhfXmeY<%nhg~FgF8~g8qyfN`KD0F1szM84{cJJFZz~gxEl+2k1~F!;vQ>e9 zSh1A@Lk(EtG=Pl8$$#GNfxvagO$2oL`4}F0xr! zzNxoz5Sf&__b_B9g8t8wNp;6=Bs8mWxJ>f5b$=#Fa9mm2xsSs0hK?{X8dIo`Q(SnGHQS%|D|N$hTn0NLT_E zjR2CYGgIf40EvxW&$qn*9o!o=_8SxGPW`AP_XMj&-uzh5R>+%(X2Kec&)tnvhO923 zm#&Cf?y_`gJ0$J>+*qCl+4@e6CQ)+}e_zJ3-VMqOYp4Hv-PJB%kuzJiqI;2aZ%ViPGZ zc#msTW0Jke5VO2Hh=$k@(K}3Ok&Y6bVl)PXX38Vopm7`2`h{{FOKR2?Nh0O#$Jg+?x}R_-BDy#M+VxH=>$WJ}{BxYNYgtN_5ggeVu8 z6F-C%eytU|G&^-s$y+rK#t=j?{7zv%*5`5Clg> z&&j@0I6*;2M^dEGKP`Ly^=bc2c=Kzq#dCJDhF@2JghH(#G>ffM!16KC7XHvLdX^D~hZ1a&esH&aPug;fjph<3cz_+M){*xS#WT@C)n z?e~9AW2fTygrEH|Aes2NY$PLN|D9baIuqz=*xmkTART^20xmzh9SFgYFR*XNPZe4J zk|Y6ip=ZSLU5NLi06hGj_}BkX$@=*&nlx;GDXbj2%R%|W8l0a?5r4Kp&~*RpsgUvJSx4)eHYCmE}CQ=lk2N`RSEv zQ-@(%WNK8aBt+@mdL_BPCDU~+SBK0_`Vpu&pZxg!6dRzk(-=A)M~jGvKsGD*LkZc% zw1gqeZNQ9K^KM2&xItInWv!;@f@6*OhuJCZ|ECH&!DFLHS4lza_su_1UjJI088!>Y zLF^IRo6ALu#thvPfplho2%NrQu5C;l~N%bc83&u)mVj#vK6;&yiX8kJS0+DS)~v*MofXZ8BU z7-&Y88E8H5^g4V^^P0Z*wZt3yL*IL!ZE!jJbn(-5X&b(p8E?J+`_xr3c^OuA{kR^Qo+UWb$Hi`Y6fQfbGthil9_DPdeO)=A< zkUxkuA33Dv3C$OOWN(B}JZ?<92md3dL&)Bsg8N|^Z$Qh)yz}$ z%bzdN5Kpw72#bsF0(2x|o_*J_(0v6kk5zSv*hA^k<0k#NH&a@vr}iKNb1n-*hc9*G zIY%iS{CC7_Dko7harfvFdV?uR1Qozhra;6#v71p?Bm3O3kVaQaurUenxnt zxXwII7o%v5xoV!5F9(B$Mp3u}P>ct|&cT6-jY7h4fK2AZZyC)yA9xAs8y^O_nNtsd z>6YQEs?roADN^BGw3PuFTp3XR;R9uj2xXM+0sq;T@(_L-gerh2Er7)!_q1tM!M8^B zO%M52s*TLH77>9&O0W!sRyd|L}3yI?&SloTmhukTf$yQxUqA z@(>sjJk`=qdW}>~E^$HB^O2|n;#v$g3^ZlQmIfWsK*(`XqZNIN<<2W1bz!e39-b|~ zT2EEK>+0&-16RR~*_-kO#bNT@7BEPiXsc{O6R$e32rB?=>_#Qhwt(q;VRX-1qbxHw zw*=(FY7`{|7+gCXs{}}lRA>Yjgc_5~209t6w!@fIF5jxJuipq0v*i_)ka%TTkvZ_| zPkq843PTM>a#yjq0xqK%r!&6fu8UiK-VQ)gx%?YzvO)dH_`C3N4? zxC`o6C1TRc*EbRi@Xn6<;xuPxy<|M0#FXo0RI1=QApfbbZ2?`n^=SQ|pgg1#(qF57 zBPVpYBc-CIzX6fkArM&9UL`4CH!o|+5*6#@a@y1JZ(C}|hy6SP%mwB(2gT+K+lSEg z0SvkSx^3pnnf4$-QBIDpCY623rED!shiDGfl86Q2%BS zDL%zUY6b>j0I`V!pe{w~X*Rd8$htd-ezaR4!O}Yb4kX{z!TALjg{92)FJIJAxV!No zCUgc4_H84JQ<4;7>nMQF%FN7UgNdN2QZZ7V>~VoJ_D;M-@VaV(ae3$Caq}85x>tGs z{u_K#&M;o0iw*6bV(B!2Bh+c`di$cQ{WoBh06&&W&SJ~RGW4QS(GCht03rz5bU+QD z)1u+oY=>2Xh7^3**(Me&@3V|TP8`S1_fa((LJPuToOv3IhZzCf=yW=Xn{x6|^G%383L5P9(W0uyS(eK-of(8UtihM}&Ff_AYHPQ%Xf~p7Y~qdls}N`5F>WX5 zTo*=UzOiWo_9VbSE7U814VIX|ML;R0z>qnCPEtMrsm<`n@L^CxCU9On9q(xMs1=8w z{JrlfO|WH>;aoU@b~Je0EREA|IyFqgPcOVkD^m`H8Z}4@Kop0ac7=gx%6%>7hB$6X zF;Gh{Z*PN?5)8b9JFOKxLcnySa3wVr&?^xi3*1=xa%ta4XWQopKjS1O#2qU{u5J74 zFYYqS&y|A%rH90+tC$@0tvCLK)Xix>HbW8@VC#`wdHW`F2c5t+qI zt2UAwtUT|d!otEX%yeo3p610M*E{NXl}nA#^~609f+`mUBy1;&@YrubcN72YC|B-T zMb#@Cjg4bL>gkK|K{RCqCuW*#;-6mDVx0qp5k?>QJgv@$9n5X0C>m}>#R|23#kRgY z+$4_KLaK;O=?>UQtPn7)q&rVrgk4j1z$?O-Jz z-K0eYS#pitfcq#;YT-!CcR(0C4CL_itSJlv<3|J{OgrA#gK)N#y^bp-#T9HWl^QNO zI6FIILF9;Eg8Gd@qQ12H6i8<0ZI`B$c);yWSC$5NKMZqOmP8H7w%%SG{5$0cjYZaZR zm!hBwZ4(z1jLh#P+oHCwp+36GfaK-gRpO*uZSi3T)C9X|93>>Ma^+!SkdD!~PB*7Y zn4X}6b+t%AYgzm@0OSh37ioeTIXkgJpc7%z79Brc!2BZ)Q9Y@F(zF4!+QbuqmC*Ut zwzlqh-?r_-tWbn*0YO0~HCG~{FED9+@1VAM^JX0_*I+>35KP3C9U&NsI&!g}@4vVM z-gM>MsaO(@{`>KJM6;z|{6kY(2e86Qf&7#!U0VPvDm=?&X4wEWC#KTAVzP1! zR4gqmgS-3TJpf9d$tpHlJ&VbOH$flhWd@Sr*>dLG+`ymEvdg3%yN}im!7Bj4;zwAd zbmHV+cPWx}B{V1~iXYne-+6e{YVCA0>s38rPmZYzmn_T+CF<64@EGq=l` zK<(^uz1e{i9#@-QH6Ml1*!OQ?4%<0BuH3nx*Uqh+7X`f;>KZT41k0yjn9tAgOA9%v z+F8H_JOhku8iPqJ7H+fb`iEc=AqikG$$}qY;#^H{XQN7!bZCm^RHTP{r9@xvQp_hW zKO8Msys7&k;}mHbR0dDg)ajhd72s?(b_(CF0RDcxI@QQqXaaViOdG2RR$5(_Gb6@; zX=t0*=Hm9a6>XBpg7H`KYk4^IpSLouqGeXR-)0*}T}D>5skoF5N}7no;k0x+i`@O7 z$JhBRP0kerK`X;#kj&WID##WA=h$`@$lRLtkE75}zeeN>Z2|I0EbJxo7o;@&Od89A zl|HdWZzPLtL~BY3mYVwe6;LHaAlhtaB|*PeyQK@Lcg+oRoEh%=i*nb(g@W|_tPuFB zgubX74~v;#sLEtnBFP_D0%ezzmd>57NuDNyJ$cRyriPA{7iLC%`6Dm5>H+yHvN|E1 z;x+n0!=zsAYRg$$8G#)o($WolKM51m8BFj*4{$7MMtlIZOTJ;O5QJS7Ae%OZmHdKV zb-X(t_$)?)lQ<3jA&!FoqUhH-l&HhZ5=_ut<-T5F%r5n z5d=q!at3#bZNT;GPD&Zc^#Kl@m@F(LDY+YxMwmJ~2K&ySUSqL=KDTa`f0!1U>TP|c z({6aD2sFfHiCEa}K`?%WPaI8r21I*^?y?hQ4R3 zs;b(TMsBaDsNji>tdim6pR07>z|z9*>KnCd<1a7LUi!l_st)9@%Qi|kkbbQUfTvoW zd@Jvf?E+E${{C(iXM}m(axBaAeb`QQo*K@N^|={>!oqU+cq;?CTPxFE^fQ($*<-mZ zbhRQPt4{s^I|fTTk3B<|4Jw}1UUC)6x3`=vy~<$V4WzzwciR~HA?WyMNDQAed)FCI z@i>>7fr_~EG1D}>d!nuj$t%b)R$ zLOAiR3?ydf%eOuDT_oa_mseR?86mM`$u=lkycqC)oMrd9y}bbDyi)2nHMvCAo57zz zg72bof&^4)cHYB)1V`8nDr#(ZAp10XtjQuDNg|33cOc}zodu#TWOV}ry(S4I4NQ39 z5HaUo-*aIs4ivI_#lh(>8re7>b1iNAkkad{w0Xs=y?-B=G*$?hlv!P<0CbUu-CH6n z{OYtwJhWLBb`JIZS0s^CLZQFZ$`1;HmBnL+A5g$hLNy*aDzb9P7`s7?Fz23C52X^g zwVCiF>$6BGvWXOUt@t-)J~Iojd*&yXi!hA}!!E=oHQm~uFcowF5 zIE<8HYsYcV?gw6BRrTFa6tSmi%}bNokai+|&JKh^1VwKKY?@FS#lO4gumVyb{cLSh zG}xem^Ge$JSbrgP?LhayEeQh?FIdv_KIc6z^M}zMnjddRksO^ri?%K29KuTggN!vN zyabVa)Thm~$V|!27(XOOT}Tj#>8Dh{*(Vi6+;ihnOs$4TsUpI59R}ItgV(AOlatwG zovcH2_FTu$o7R7=I{6{slpoN7C^-A3cc2f-5TBdH0KX0fIJwBF6U_0|Webs3lSL8w z1cI`&vlnJyhRLCx_B!&OgP)si+Y%*r@;J*ftX&I&3%RZEh7D=vlAoSOfE=WKoM$UlSe`b&rbWKet+%4@K$qfq;y~YmBU0uk(wzJ5Eix|sNZHo(zjsx8a&D5%6 zt4$NbQGCvV`6O|2VCM1Ec7v0n;ga}RRN7FL&k{2%V9;^OYv(N%(qbJ1gyKdpm7&!fgJRFDr+E+t{oHmaav02H?{)|QSou+!h5 zOFv(po|kv~9UNUJ&;C$k2_jz(MpZYoFWI603&%oUU0Z!=2UHG+s4ETzHgR!ru!8+2 z^OpO_IuCMoBYibFrCi__Mh9Ap*wd2<2?^910^a;=*hzCmQNgWBO5rs9T*2oA7rVv# z#qT1se!9s7)*@>noeQ88OJwI{A)in13V;rpYQ>w92I_<$5Ar*`i)5xB@DBlJ$m+dN zX@Com6{S@5F3kV;>fz%8G z9wh3SIXPh+0W&t=yAmzSk8`#}K;ZZ>X3k zet6eb%aC73+jKVTeEi9_o*d@_Et z<(#{_=QEv^iMDKDl|bN`FJRWdUV4d_62d3q)gNsrMW>}_1?NU=Si3|CX;m>eJO5w#m0^kIv^FK6APlhLsgHC2Z)Rt8+Fw zm(X&*oW;!A@w^w<@HXMpze%tmav5)q?xD|ayaF;F05Q3R`c?BXS+l|$8W;nce-n`f z-pv>|$s;oNN9(tyg`(p3uZ_?D`ZaL>53qX(Xu>_S%Zh!Eh`xbP-iwolvGIRCLZA?) zwn=Pm>Ju+4tD}l4W8zdHXoTx3P(_eVn&BBe7{h`R30$EdImNcJ_(ho=tvwp2;80q&9aa#=Za zQ%An{oUn7`q0t`&?HJ-CM*dp=J^OIMU!;^d9e%MtvaxZc;$s}`3O+8fCw~6SHijOY zI9_@C_VsLVoWXeZQ*eBj?nv@T?>TZgLhI#jsR0vWgWi^wViG3=f5?Umu`jCj$9xSD zCfN5a$KYoNB|C2z`DFi2-TY()xRfnERbpX3cjnCIVA-MHcFDXjOr+>SVnKlXcS7eM zgv-C`OAe0ii4!MY0V<0_5iKB6I>US1h3myz0yV#Xxtu>Y^6EeeP~Ou6#?u{2fByM& zr2|CRznSS*ZrlylXw3J>@((s=`G0Huv&o+Cm!!D(``he>`PHvJ@pp;|4C}8h=kK5F z0LYzb_7W=is-Jz5e>|vvUh02uy{7=Eep<+;gG|$qSZl_plVlWK=7n{h!jz1|bFRab zKjOvDF6UpAACS0fagv*qZ-vXPcz!A$luh-#bAyVUKYnuTT9_lK0MtPH%a`m={Lwnu zEPRsbieIjE$VhJaaM#dAq!dj^C=&$3A3vuSzCXqP#9jV}`}(hwkRx#q1bIV+Aof0VX{~)!7#cL%?czmtg^@So1lOe>{o=l%Ej0Yzii`hyi(-hW zL^xWD$gi*Qk^rSD$mpbWMhi{&5hC!beJ=mNFHZ7tA^%kaU?B=V!*E^!2poL0Uu=(`}mYi@+Yyi8FxP<5&sNOv7>^(02SAf125jJP?i{O85J> z|LHEByTr~^Kol6<*>ylczK!k*U^WT|KV&3_7F19H2<^+uRy_Wna@%Cb=B*kv%x%Y@ zH2LXzp~KCk1AS7norORwy_s2$L{u+R8jVj!KU%<(H0)y`53$r{V5zI_*LXjN*@3$j zTJPQXRzW?fM{M>E31jHd?&RV3ed82t$5w{+fJN$CS;!8=NLc)GKK%5dP@vqn50g*w zAp-T+^#eXK&~@7xnLo5rh9%LO^0y(gR)>~*Rp?e#UFxpKq!~BIyti+6BOfIvN0P0R zEk893A{!&eT01+t2M^w43IY_Njv5+xOYjNMCuPjNBI9wAx?>I>-h_r#l9ejEl0Fb5 z^Czah0K^w!Qmq7Hp}OqE@FNfU12k~Nd|c=U^TJJFbl|O`l*>~cD6n>|2FN%_a#;)_ z|ELR;(Fi221gryfe7yifb5c$H2zB^jM6@M%G?`ZYg9o#W6NG_~lglVtoEc0Y{)N!C zSASAI?i>x2ld5u75z2G$|JhG@hL3)idaM-HX~4U#7z9;({?sA_YAf{x{-9JO6sg^9IRq7)gaerlcQ+fRsoh zg5w17vA|=JivFsiej)D6xuA<`9Qh zr!{Y&48e#j*U8a=Bog?R!6d#IJTcGg+0E5l!WSW!hk?Hmiwd{HKrEiG;oEMvYQ7Y- zsu)?hY%+xKCouja^jj%=BSU}-&ZU zLKn$NbERH4zV8OYN?PPR9@WQ3UjO~rRtgM1X`}0o!jHenJYqbDkPi~%&~M2D$vgtG z$cO+j((W4dqu_d`j>m;-FBAZnP!!0h%d!3p4-g*7HO_0sz=oSFF;R;7WM!0rcBg&@ zl>O|IZ~JydATO{EiSh852G&vumzoB2#jSiB_BYi#?JFjd;vqJrT{gUw%4y6Wr3#j) zcCgSw*z2OAqDXg1XHOG1fVGi(NTOqOXQw53T5st4e3Irw;9HoNq4rlN-)q473z_6u zf@P+3#Hk+6iUrsYh%JjrY>j4rvgRcl5*_?Si5 zYO?FYgwuGxQ#XinTJNKIV&Q#TNPbJkK}?T>CH)x~hmB20sr7Stl^&>3NFZcgLwAr^T1dJQ%g4NoJ^ z8oV+jCKR4Q(Q9Bm$EJSKqV{R*S06%hS6&Bl{b5}tV(s}p-(`XdbzM5cC7{a^ihdl> z{D@tP5yL^D!`lOmP}v zkSI8{UVrnR!2$qNZjO~k_cD#{H37{o#y$zBMn3P|#)4L9)DVT{xEpBakw~2j zH#{7xG>wJka{wXuWF7-7nWo2f0T~xwvLvH%*z(g5&@#Em6P!rqDk80t7^F_4BwpFF zXn5kAM6;V~KyZPkZomi+n$(lkyG)e)>t9)VK)r%nM3&>r%#YYP8snI`wtU|SpjZ3= zS~_*HV@dykrG)sWM(#4_bDRtT=yjpH^)UX3#Xy4u=0wTBk6HiCp2rhwdUzLPJ*CbQ z5ZwY8^w`)1WSJZY$mbjGCNYwq5}y{;%mv|0U3EZkYCG*Q?T4JiaFNuuZI4bV8bIUo z#TUpa^**90)XKge1QI=V`ZzRR3W^in&fd?;)P73M3AlLq+w-Js70I6G1m{F*Je@sR zCH6&54n}YnlC(xsI`0$zP__IK z$TW55xQ(1uFz$u{*I}T97<~H*9FbefB3txur*A1iVA0NB0pDSMChoBcu-2KI7UfS zMWF<7m1f$KUB?as9n(DantpU&vt#iG{v_fe@To6RUaw(t0Mf`up?IkG{f- zivU?Cc^T3d^{rc75i|@OO^Dgjb?Q)}nm8pu)yuKa&!~H#a1$0MNKF#` zp`Fydl+Ft_N6 zIoo!ribQ@`^~Ip$bU*a-=;H@z;Ul7O^YKNa@}wq6fc2#a5uVqtFN0hO?`wp(7gnwo zfaQR$ygm4PniWPF6p59ni4RRoW^G-7lUe1)rTCfFf@AQ&xe5F`Gb>AuG;nw)61E|_ zVbMZ}4oCdIX(k%Mr=A^V2|%k!j|4TIw`XZk!TqDVVKKHHhZO0dQCv`84@q1?*-{z& zBz11KX(7tNdR7e+q4CP{l=6X+!j;ERDuy)69&;|_%DeH06e_SWHOc3nrpcT*aUj0T z|5!an8I#}8>RcLhgT}2AcmTnDdk>Ac!$u%w78aCf?IPi|_#6dTZ1aJvQ8J`lQD1@( zA*0s}l}f|C_OBw<>yU&5`tKmX=~Lh0WkJzDDWYAt_9K-b^gblz!-@CTri7%pMWg#T zEvUZ|P$f-9F2Xf0Z1v82`SKOI>1gf}eL{j*0sE>o+&BPhFZZxhtY_)z7(S<)-U0q< z#!so)oc217IPY)cYa;#1)J#tkF6&Y1M1x(4-!cAs!j#v91AaOs8 z$GY|DamX@sG2b1BYjl41RV*4ZuS1RUu*yOX@+@pzlWdQCboXND+UFZSf8kQjwAnEj zj0QTH$p3&kLr?g43*=p07C;eH-F~_WjP&fu<6sa&6YlZs zZFr0ZXpmEY!zT%3Ce7GX%0xB5+5eC3^-#;;Mm3{t9-6(%L5XEfSG>IRvk$c4xy`OG zjz6eC>?0ICr@tIbc#Bc?A7tWFZDpW~q$WEw8_(W3j#w-?=T@&$n_(!JBS5`xVFrhY zAmR2PJ{Wa6XQc;Y^N_>-nZ?=JR4fNjiTCax_z)EoQ^MBv1H`z%Pwd({t6PvLZtK}O zjkgIV6r`x*nKw^Vuyu47&K|A{hcSu?r3d=t^7oe>xQz@_*S?XT@*q|S(Q#NL!sZ{T zR-&HC%;aQC^mek9X-Lw(y>}C>1%aQP&1yZn17iW)XRdOu$GKWW&Q{?Q>y<0Eq~aq+ zB7I~HlrC4i=I%ZTF-IO8s)dob2LqY;PJi)NufrkGgM3k=NxD3^>RT)4L9Z7Ya|7Q} zHVngirZP?&A~xu@3dET`gJKkWu@qx+L>f|X92H}h%gyQ_oEGT8eZB=0t1_elmeB03 zn{)WdsR>l*yKs^IsxkU5+SRvG%N~z5|hS@{R-kZI18k|rIu=8{eWur6qLI$bu&SfpXJ2cZ( zpiBNM@+JXD2(Su55XkPozC%}w1jji0^E9ZokwrGca&uYRpEQ8Y(eeC%RZCqRSq#qP zecUlvTF51@7l@+*$7gtHW{rg(1kY_>aTchdmw$(Zk0a;7(H`yBR50dicf$=MO)6aI z@j}9K3pv5;+mPuS;n9+h&qKrj2q@3DLK92#@gkLi#Yn6M{736!FE)(|&QR=(+YnI_ zAr4wmw3o^A8AelHUo8f3HvPHE;n}lihk(g!S;NUg<4&89EQoyRr%=zqT*~gBcj8 z!6cmmCT*^zOTcK(gXbOqS1v?Dtm1oVesZ)_)~z|GxnY~nt@dvsIWXYd^S;uJ zxoOA0a%Eh8FT}@}HH<4-7vO+|vR|-COh3l2RJ`8DS4hsN+CDz&t5C3YMYIg)C$8-q zb{}#tPjEdl%C1XGIi@{php^b<;hN~pl9hVjse9l+ z#UFAwVXolx0D^sIM_!LmLAPaOpvtjT`j6iTf0Iy4Z;f2&uktX{Qr@eLq==q_4gjui z70{n`u2(x{A=K2Gc>WmuZBkKQBdQ4bEH8Ce3zkr2x9ab?jeV6&uWJ9?42cJ={-FCc zYVQcOOQ^ZFMn=qjsdxuuqw0Ku+>i{=b3$+W&BpoAE+BtpF?5`|ibHk91ac4?*q*>R znJ_w1&@w@Pp!{$6aw-}d8{dha#tjP}K}-_Az@X7KTPk|AJ(2BF+6rb^#nCYZs^JT6 z1_gs6y3CccX65>9$U4plPrl_=czaKpvfmV%L@4zTlC|^K1sCoC2TNY2uxFIZOm@*# zF5I+z`}Rm)FB8lksWBT^_a(r3fb$uNEW>__-;h8~LcrG2J78s7wLeqz0L~p!2e)0i zixYJ>j4$>D`_2?8p=bk(p{8rs279(jp>01ID;4Q||bY&=v8_ZkUa37zYJe^T=TqyC-R7?Q9* zoH&fI@L#+Q<5U{b2aVF-&&--jZ?B!}b3e}!snAM;%MkKAoJ;e8k&^fRgUbX+8<0NO z0|TB=*i_8*F^W8!*_=E~3%JOQqH3go+!K&#ldCPEeb}Hhq#m^wc>sl^lugSdbn6?e zE-hh)3NIga7gh!YG95ui8 zVS||1-J*VK?*Y%vFV8;!h-22Jw|5O|l$~(qt`G^}efLlRXTMHlZxoWTHgSj(LuX~~ ze!O_vOS=!hz3j_d1~SOjF=urtTk@c4!X(ku`Y*5b(pihCTt=E=w<%{0?-M|{&{?Vh zo&*LL1vC-@OdYzPFac>Tvb*NoJ%GBNjyqn-MOzsaKh7HL_9m+GxG71zF1r>yvSRU+^U3Xp_IKHS3Gu|w=w2BxhU!MskS*!*+V zrW!ND!jF{@?1;3$d#@tBasp{7kW3aqRDd89`Am~T=mxUo9Cf5x&bBbaEj%>$0W-;cf%6oE1?zqioZV$KOyqm0>TQhi@QC~+scPl`=7VIgPm`) z>9OSH^D0WYf_wD81+?pA)_Yd|sq^_Bw*MCV%)t@~+z~D1Hs=hrXBM~ID+@!9$=6pZ z?Vg$2=;*KVDq9*N=VMtKN_mkC`c^!=Z=IZE>v>Cg<+|PgMF|2^mJj3ao>#8iM2`z4 zx9J63SOEG)>rBP@Ai>g(wCUpIU$!hKG!4T=7gC8+(}R)PMaYc)NVrpWgeD%{S;gzA zTG+$2hVN@{ucXedW%3(0x{#Pn%#u5F@5#-uNYuI2nkhVJvoHAW*7F^rCA^{d>-3tB zACLGL37ByD&4WWgli0n00|WKmY4S-S%7Y86f~mo&#)6!8%Ga%ThUSVC&+lFZqLB@m zSQ8X0ML_@DU9XZw0mK#LIVbyW4@w}}^#+gAPe(^byG0B*1Ig5xrSbWM!Q;R(x3Yte z&4B-&HYiW@mV{!?>5ESehJEYLop&4DKKjR$W}SuK zVi)s5bgAeGeM8O$kFCd7+rOa^Or(&r6sbUFWOWeU_u2|QSK35W-XkAPK%#eH1E=U8 zIMuzA1_5A^p#q8-56@EZHk3c!N#!^w_lohsY0wDRE+T6H0q~er*@UEK>rPG|d4XGD z$XjP#lJm<<3nHED*JF>zo+(J5u{!pyC>hK zK}&uJ1~Zh$z{BT=Hix;sQ^$?$Ypb(t`_M3Blznvt|HU*9H*4}4<$d8Pu{ifUMsRr)ChemPkvsO z>X6VT?tDJ|ME1ZtE@>M~{w1Lln2zyit0AnDav2y~H;O$ywd5VjA~s%t$&)zsOy4=# z+!faD_f7T)yb$-X(5pY`iI;)!DpZwEZM91})&q~L0F70Iq%%|9pGh;`VI)bxl2{jD zFJUO9Nly-$3#&c19~~?u0HW@}=xd@8U$UxahRwN&G?6FhFNer1Ar5}hPN{zDWNkG3 zVLcB8v{2BA98LhSmpyF`8lt~Y#C8wRX&S%GUl${+^=8r-2<$@TW3J~_n>Iv-2NWQt zGU|5T9E5*SB+9)|<7B}8dQD$}#e2U-EdcWrfwuk=g-+q&;k;UUu>ycXc*QQ*9yxL( zvez@@>eZ_Y6K*esjluQYJBatE(@{e6`0uR7N)#z=I*CRDHeRFP{UmB{TK*MMR1 zg&k1nOjT1}se$PP;qq9fy=CI?jgjkJDjtiAIDKsjSMH4JCL5#lBcoyyKgMObXXr~* z-q6N2Nw4wr^prFB!T~ML!bXN1TeBXP>HNEX)$=0;Z7Rxg9Q%9c}C+>)JPwn)YJLtVacUr8B z9+nr_OZN=WM63Jv>)+e&T*U;$G(2vr)TZK;+Cqsfq|&EDCOut z+3cD6$lUtrP7lM2#KjKDqTCUd9B<_45YvaE8X{3fn=chxWe{RW?ni_0E|El(2JtC|8-M1>82C`wvRzwGus&&`ui(e>Vdhe{Pfx#^T8}F zE$`^TbT$&8p;r>Y6b+apzbYUNmSpn0542;1E8&)V18ordGeGr4iRhe70wUy|0U+t< zIh)G^urPuhyJ2K+3JB8+_mXO0YXt0QhUA)6t5lE$iv_@kQ>j$f;b>$=gn;)_%^Qf& zv-6N|-UhtSEpcB;Isxu!MYd8f_EA2VCg9e7MCvNAkUQyDh0z{M2z!kkI10E3uOs-w z-Q9hErE|-@Quhy6psXFDKLXK|#ypp0i1CNJ@H&@0OFfl7+icu~^LNc$pH2(s_F5)c z8HJYU&oD7D39sX1dz497guQMfRL;=ua>iVT9S{azX3+3>KxEbr!k|d^$?#^N@Ib^6 zxv)&!;#^dGohQxG{qs_c`r*i$tbb;~Nav2iORM1P73WcnGQ^Q4R(bpRMd2f=qB+8P z))jn%X$H-{2LqI_R^2DYZaD|+nS2ekwNr3wVbuEqhdY7_d>MV%oH%kAb>z^6S5aPL z5UK@%odi@iNdxV%E_dw6Ntd9GU@$d$!DeJp2JF%6$`zF+%Ps_O%cT4P^eQ!>K2SF> z2rEp)nbQpSXzCng)Ws|sL_cUk(KS&k!1>_p3&A3WhKtZbHL-3z zitKzmGD~>jiR*A2L&7B5^tFz_g z54@O0Q)5q8>;bO|=kvW%^FYi(kAH1pH-SnuZG_;EpA_S1RG8S$=J|tn%E-PzL%hc0#L= zKGI2P$`;u^>L6<>bkhp~x&&+xPGA=ii|F4YOMCbvLnncL&HYtqObNvX;(l-&R`bH( z2bC*UPQwa)h197^c}|{mZ?su=1g7kO8M`1^1zWXPK%Y_A$mETNCUA1BLJIW~BWO)h z+FT2_e7s?6XGf!$X^;W&q=-dmy8jDNITdX50zs3cZ$Mz>SY#IMghuA{2QzGb$H48& zG-UV~Kn`XBv5v&nQDlC;e?^$LsI5;oD)8%{VjrrO9l0-4xd$PYhKiu6hv2BwB28=I zT!}2|Vd1O~TUPX1bhNd-!kH0`2v5nCI(}Z)wwf~)h6>auL*BW7lBj|P38)CjotQ)K zOIs4&nY#hs3rG8hB9g291iX4JnQFq(`$Z?GpQBa1L065b52zdjHL+XsH` zl8lY|hR*!W)FLF(lvvjYjJFi$NC;*ts3B|BKyh4z=3a6w#Qp)hm;M@`R0BwpuvH+K zNx;CASpR?Qy?I>DdE5WrFvi$2W6GLkOi`l75@lZoQ;8(9RJIY7rBW(n9b6*IDY6SK z5;bHg$*z(Jm9=b1DxwV)^?M#?g_(QY_x=6j_ut*)x-K)9Iy=wL=lyxVkK?r+;?tIH z23&e1j}o26PP2f3Jpe0|-QE<(F> zxm>l|1n?{wP1A?OPY|Y?)G{gP(B+v1+TSRNp$wM!k#aW=^z) z!@Pr4_T$dcu5%kaKB&|xeQVk1E8L|@3L_VUcVs2)!;`uh7FpVBw)xnm?9Y7)6T6Do zrYO~FqI$Ics*96*U%zri$N6Omncvy20~{%%h4O^W0bp0rk9;2O+au#5?5ia`vSy7M-f6*bl9GlHQ;?u5@VML|E@xxUldiO8 zmF}7dFC9}eGl`?LY5suuB@J5lwZ?&@?c9Cs!p!_A;q1Vq1c*Hjg$!!heKG2sw$u=V zhB#9$Qf#!=JDBf}9|DHaz)qiTRqPb11e(9>&WX>0*-0eb$mfMlt5Ch4;xunT%j8x@n=EpNQrp53Nbc{-K*J zC*77W^Yil)yA4?(&zEia<<~*t#)0(^Y#pC2CX7@m)!lN(OdSRZDKnMC;OLe;@Hjt- zm}##h2f#L>;u^(j$6zX?W8-Ku>U}e#_CQILDc5Fn5Hg&_vZm!Dcub7^h5HuSB_Rsk z+1%D-Zh6O+KeZcng9le2A;=(FlKa^oIR|Z!M`Yz8089#xFN7^(Nd zeXnm1(^&rj%LVoY(KDu_NoJ2H7ijV`i8{b{wj6u(yyT$QXZdL*1k8K9BQuv7P{F}8 zjD}+*!V^>m!o?$c2mw7iMF8wDqp)o<0*lsaoVX%cg+=_Vu7Sbvl)y1yn!ja(crSG6 zV&D7RV4X-`wXbNm^@pB+p!h~*B`c-*wc1p-hv9)AKk;7TU2P8!I34mK&ZkAz6K@qY ztx7*Nc--LYjV-p(Z#lRxv$=iBr;m-d4sL`(*E$uyBa~MJ(2C6en&x9f_Q~>Z&h<-y zeVg9-K#Df0XOD08yV2w^FTd1w3CQlz3{y&V(CoAq6#ghGpPIV?H`9hKZb z!4fp31r;q^FVkw{t%^#AZ~7)qnfZF)V5bo9h@Zdiq%0)>U8T&cJ(*TTkblEv33U zJ~wOlWd^;W!~z8u2xpp~O%V4lTW0!E`tFA;^*Cj%qB3b2m&)N<-nXI;2m_kht@q95 zZ~xUe1cRWlTTjGUOUlrVTyZ$wL0kKG+SA>7!D(h8znRtko5)>pscojbyAFL&3!Y81 zlwWY&E&@52_*iTF>(+Et83uQt=>J6RmD>IP;#T@UQTuTy!}A1wf%4H%T!eXu`#^ED&Ve`gP=`gNB5D?KmOzrEUju-W$i zj86Z*xiwVW@9vs(XS>*Q_NNW|{%}@y6$19AVWlNeyHX?Fn_|$A%&Py<&;fHj zYM%N;vrpm@khGS6-y19ah#g{>C;(Nhl&0f>Bh)?f zzmuj{FDW+d|3`=8KLZ`<9-h2${ZU>pP5;wDEa9!LMwzn{8bseU;LzbjXCbEW>S1L z@4GOPpY*X4;{$TZzb?I6*|4=9k~ajcGFq=?;1}lCB^Hr~v(EwIaGYBq&qSC)u+IVd z2Bl~j!^7yOCy>tgo^-bjL6+h&^s`|DTdLaZ3APf@fgoZNO^6ha;wsmhLkFcKQG3oW zI#3QW<6EMRg(|6c|Lv;Qi_E7?d1!e6>C3M%-Q_b@eK|czyX|@d zx)6G~@i63KnXga?icHN|!N*2!Frigu)yXA4vZUTKt_=iBD7=esmkdUbfhtqqE_?cc zo1Bv#eUhb@iPT6`6SNO*qb^tfGXC_}?3>s7E-M+DXnwY&1A);95$Q(22@hYF*wf_I ziQ=2yHM^*Ew3SUDVz9^vDo7QT&3|5=vayfRW!WYp#&-OA1$kl{@opd<JI(8%p zXm7{SB=1RpaDG` z*pujmMfXf+X-W1DS`!%`BXbvs7qtNI5YFKsH)FS6lWADRCIj6J6XzJnzD#;VtIhK; z2!@Kdw3}u<(gRRK3!){?Th}WG{CVzz&}y10#MVab?ity(>ZTshNPFn2RPQzKYPVLv z3mHnm6c8fE(D-b`d!WYvZZaKYx{XAG{kB>~rJ?bN5qA)p9HjlB)0R>-_o8J6OvrtS)E|jJ`ut&4w_T+}b8-H+ z@`Fo&he@1aNIs4X)d>K!P>c5A5ffsUR&t_{*|6Z5VJ^-;$`u2dXr9EQevtf;CcPd$ zdNi@m{37%9t@`jW!ig(!_pMgus#r^XbS^`vRwDW`N@Po!a`AkLM7nSaCfJ`^WDTl+ zKLB5Xa&kHPrL2MzJPrEcN2}jy|7M{q1I*xu#~WGIR&nwJ z=DNlVU5|&Th>Nq6vzEOZ6*J$B!A~bHb(UEks4%OC$`ESE)*w>(5E;Bij?hRar#H1) z_F$cHhm!?3!_LD2BKm*cj1@g}YSi56yzWropt<&o|9c~s1Ads*54a-tw;E3p61BI7 z(_RU9PG+TP8Dkhq#ULd@e=*NsACW8?9{OoV#;P@Meo!=%g!!EIPYN`Ye77`Ua_pCi1f@Vo_d7I-Mc_Y(<0tih}LBV(8?|*Jk`Q3-=BrN+^!V88R-2 z=6++`La4c7kYr;8%#MHX?)eFUa{-|a3Q1>ti<%)&*p3@pv1Mh0ilz!n!cynZ!>mCr z(|A6ka;&qUS8Pn3ian|nLz>1iP7ll+RwbCt))4?sydD5bf{*bWHo0F8&2U4c>kNOj zAxKG7!g#zoh@DQ{e-aIN%QHHe=_WE{Irxsa0AIdd`*h3m-+IuIwLpJQmj%h^CHWPw#Z*$93j2b~EJ?`VJiWpDS)0Y2?io=UStKbX z^W(TM^NQvZaRv)|Y{9$PqIws{fvKs%AAkJuf{U0ev2K54q)`XD;m8s3KzPjC?NT0D z_`$znSfm2&8c-dLVrNOiJU_g@SW1P>3b8bq)H(^kc zRjX5cx8!!1O(?2?1frgMz6HR%CHnzqONIef8u>BeL$BfZT_59JZ>psRZ|yN2UP*@ej6(#C)&H3QLG z=yBy9%REt>Cr2N=YHs8bz$nV_P?qYxetsY3Hlt2g)i7*B>XOtN2t3U3l4BO#ykSEd zTm&AFmNpLY+Q#BvGK5crBhho|zH-fzyty4D;cLjn0kXAVOE%I_6|z;Bih$G2YRkYH znCY+^HU*+m5)@Dd4vH`e^VMbV~z|GW>ow~KF@PZvMu)n3u`2u(E1gj%+ zs)d|Sb=XYtiOf8Rvr1-x>+LF}1~l+0btO+Ogc?eQ+R1z<1oP-C@|{w-F_%v# z>T8M%xw34IN}dt)Sm>UoxAM~ivCPkZytFpSq7I4kf8-&@3m19pBbslCg94#%77jyi zAsJR;5w6M=lL%ZE{?$y4@@(pK-}&CDnNH69)7LX1W>uuVlHCY(vriTXDO6rE@~ZZ0 zF1rkH=cwthUZm~o2Mroz_NamfA7jF=9N;miKE?l=KOSn6MxHWgZ^z-2kW66;A$ep7oldv6CZAVmUD9xU?|18glL-XhOp?!WQS6KzI(V?mU_*;# zLPaDNkIWzSZo^(}MvEwN739YAw%_hczL{*OVj81TAKaVpycO3A2TCj;JOvEmW=+u< zNCKP??>37+7B_LzD@pSjUP5;vbL9_mm&7&d3X$G3rh>gYoBwwVpNPeb-zI8c*S40h zFv7Fsf7G*%{3|7BIr(VK*JG`;ewNsP?fur&^?iaQ(qE^ngr`7Q0ht0Roa7nwIs#CO zk=V4)Yfyjj;*wT)2W~|ML}tWLfoJ5i{3H&(SrW^0wh+khw6Ts=R zg`9+fN>O0f;RSj`_D7|6JogQST?$d;YvXuGyShD!ii#TU?)VO`4?+w56cuwuB1pEK z@cVFlTAArb9#YW{Y~m!jvApx^9_5m>&Sldnc>nO>Lm4cJ#>CR$FrF2}9s@SIhX{v0 zC#HCdR8_{M(AUKzHC~1Hu4tYVA8$^rrgUMwW@nKRfkN1em9i>m`tt5@T|`$Q?584O z*rX{IF={#3!!QX7;G7q(4?dB-MmPDay4-4>4zUd33Q{+kIu!D-_)7adO|Q28z;719 zONx+SXSu_|6^fY?0kW`SYi6o#DA+P2e&V|Glq`J1#Elu{Jfd61MpMbj{5cdbp-?i4 z08$`MrMPaO_&{hs9M|!%z3*Tc?0Sm?(`Q?l%=6=TjxANS=zU|! z>&E?CS+Y}7xuFVb+`I?*!!p|`aGF7)NN>2+TMUD^!?OWDm{{3=YnfG(+z0 z#AoGaN}jiX$up5y<9)i}rz6*dAgB_W&h^EOpCwp9Z`Mz0YL+>T)Opvp*u#CY_{$D& z%ZlEKTQJAUO`Fq;Lbe@-wN(nncq>ag0nW|JHNhn5043DyQ%BLaM9RXx&oWso4FzuabWXNZHEjVd}a2@Ce43p)M!fL&W#ln zed@}_aq#(jV+H)(Xnmca1IfO}XJ?$g-*wb9<4)Fd066zM|Jq!aH%#Bvh1jI{Wrhwl_r{zi^lb0BNwc({pYFfc!juPo18-}YAtR&=Tvq(v@ypn z6Ly@m~eY97kEYN+E#tWE5nV=_Vl$uzpK~Hjy+g8 z>qb(-yKm{KeH}=_r`~-g!0Y6<^%xfr94x`R;>ecLj~LrQxKj1HNmhF)`|Gal`URWg zq`hjDT*3RZaWt8xyiKKtgtsQqrHUJJ{O?zsR z6?P}?Le}0qWC4Uq^%;NX&dIwOVG#CWE|g}WQLgTRNNd%mzn+_MZd>Q6LqeKdp+5-+ z9UgD-a925h&JT%`naV!f21`o&GnZyf_Dve%UDnrTiq!WyNUq% zZZcB+?>4h4khpcFS~YF+^YuqLT_iFOy0@5W-`DWOKK<|?P=}}2nK!lF@`M@6L@o2| z%pN~F5B*NB@xSu9pm8Er17kZ|QU1VY)we3vPdVGNU2pcqPL%So$xerE3@Qqo1!yy) z_wZrET<4Rnab6nNR?X zb{Rc1A|n#DF>hGEzFkAbvSTg%0&*#Cqhj=q6Sf1z z5>1krSaJB>?%lh?q!O{8=}b7rjI<~End$e+B@CLPSmshlO9eIGd@-2aq_x+sUF%eA zUAMcrTJU3|Q>8{xrCk?SF_Qp{Uw{+g1&1Tz2-!}o%{ygVW;TSmS-;g~1{_>ib5M%mbiP|5zZZbS>~ z1Iw~r^0DgtNjlCucqtyiQFP^>WT@vWw?Da zkEW>91!nb0R@4FAY;`Ee1Hr1E!G6tl?NA@JfQK75fI_EhEB*1u4#dZ4U31xhLX891 zBw4eK;jW?+wwe0)jN_YBz(;O9Fs}TVX$v3~xQNRWCu|3B8EiJ6>f; zFLAO9)&NeVWh@gxl3&(tk|A-zfV-NRonB{nFUy#HI)6y;J9$`9G0_iGT{u!k2Bmr{ zpqm<(ytjCYyORbCLqp&t=UQFoA)eFK);@6eh^(6`n=gB{PWSF(1Zg-iYm={g6E9+t z)};6C-1+9%Eql43!+p9AHlkI{kC+jJsHy3C_gD8bZ*@oE5p-wr%6c|u_EW;jFeI8M zN;g-Gb-`MLyj?jL8Eu**ksh(N9iuvHR3R)HC!e$X z6=uRPm?WG?GR8a2FLsc+x5?=}RAlPgwe~*lSQmA>K)ui-?}9oSd)Jx-Eo$P7QRe)^tmu}l&3#@5NGu9aP{I@Rg% zO}78IwLR7BfYn2l^cEN>i0;?frU^S#chiMRg0nIg2L#x!99>Ls``sQF?8hvXR2`Y? zeDy(VL#O^seZI{Rx6Afy@$Cr`>};%HWRT48Y7*U$y_%&p5_f_k05IfKe6QOTg?lQR zt{3M4PwFY__R2{xsBw?y8%00e<^IsQ)V)83wyXQQjp{h@%;`(LN_xzBct}hPNvrBN zv&iX7#p#e{bC%136gDOE70Dq}JDP3Wxzl<|;d3r@!r^RxNzda6%l{zLYH$T_oJy8@ zF-CElqZMyMsorlUIuldlchvgL0+COy)6glu@Q$3g;~bS=UF9;*`Q>NFV`c=xR$5*ODYN z)CPhPA><9_=;u2rPerwM@(`sa)b^7*O#0?&S?op9PS?715Gn9PyVD-t$JjegXR}; z%u7uFDco2hOY<@S`^eL;#iRkwKRlfK;-xpMjA<1wD0qF-XIsHs$iRAtImrf+ z03?#fz>$42gqO755+QL+f(+AEjaw-|qM76=07(QCHI7=i zoO({u^?A4sUh0W(CIoUxATzOliaK0cZcgOE>)xT4!MtOVmep^tK&Fyib36s`d%x@w zmtL{BgRN{m2TRpwWaZ&3od!q@Av@-Bq#*LI>tml5d4xEOP-v1nvNt1&(TacE?jv2rp!ph;c$l3o;5sQqCK$H*3^i+5Hv5vk{H<0x+zH zU8pn;$%#LKNi}C#h|#$+p_^M^*%To`{KVI9aRUcEAtU|}F!&6F`LgdF2TKs$sQ7w# zQ^Z3FXh-m(h>zv|vVdi=Z#ne{(paWJmZD4*N~~qSMb_YV>12FS)t)wfIRmM-WZxhvgG1O8o?uM{o0+?0T#;38IrQN!}08xv)cv2N-NlmEawr zn8dA1x{%%FxVTV$qwGtPT_m#%52AXB>fX}&5O?KYCLnY#kD0I&>LnLcJ{go(JP62% zbeT*fmjn;p&YU*HKqrtRlq)VO>c7wgOU2&2M~O!xiJbtJP*`q-F6ZTn$73Np+26l@j` zI}SN_`Cp@~)4C@QAaokVtQ+;1drQM$u5{=(EsuuCm&>P+N8|8 zrxeW?-t6&|e@$U#RY!t4D^F88X$f3l1*f^q-~!c^IE!0?;TnUJ3pdDIh7Ljh57IW| zO#>9Ri%O=IFo2K14_J;J65Jz3IrQOq9bpR}D~dU@9D$mp_T%}Gk!=|&A zSG^lA`RSxy_+ux-grZi=z6g9l3XdgPhmbPi`m@Wbzs>)&far8Wc|EI(D|rJ>HT7$c zsxSXk@w=F<#vc|7+i$=98GKd8ebl`69;g0vF<@dvu&I2H4AUO${JXR(Fa6p=C6Z4k8x_0H|}xx>~TaC+sk!iiV2GBdRhEOx6N zIVB&^V;!ZqL{>6&L%Ub6=(|CD=ca<@KU`m2jTtDvZ3|j0O0{6zCo-H&;VR}7Nfu*# zCu;e2#*&Ro=#7NON@XMJ0%SO&_YY|x;Fpk3(Fw9qn6GAtnNGAx_SL9ww0HvXCwygk zwf?FdK|j35xgvI{cu;TA5^>sT8_SWzOcoAjeMlF>S(^YDWaO!&KSKJ8R40_vAx~Qd zj~poa=bn-m^m6zc*E*RFUu3K=S=?rhd&?P|zzW%+?Kx}oW@XTbA?Xq=fgEI8J5QCU zrQOrMxZWyFqF8jmmm+Fm?eT$q+QzB%$i9H1E$^NyvP6aO+W|QAP+4_4+gAC)CiMB_ zG3AQl`|{<>I(mA{K>144KSH_<;e5*BNto5v#%XHE4Occmwt_J@qx4Y`gX#n3j z>Bv}3!KsFa%jmk;4C+Sp=#M<@nsjc)&zt;B(zlgm!qKO>>f*>5LCcg4^Brp7XD2vz z#nqu1LQs09m1gyC;7_M*{0wK1ma&wV^ZRt`W+{GgSX>-BGepUQi7VELR5cgo-3b&;lR; zQD+|6BZW@rAM%#IRP9}z*!_pdvHxbtST7@7A~X8Km2(j{quiC^MPTRH1sqIXrA1=3 ze3qFx`q^7Z`NC9HE)>o0Dzif>|3(DDg$VuY2jdwu&ckP)P<-vec`on>FD5>Z*I z7*&k~n$-B{w_P{`7-6@%9{H6 z;V!%S$Rk$$Wo?)|y{Dn`(?9LY?o(4pR3JF!no3>ww8m(tje(3AKtEvckahAds`78@ zKl@&07S$V9BP}>>i&bP(;NDw&9w{J=J-fCv)KC@u@W!F7@}1n|mvv~4N+-J!6CJ8Q z#Xn{qMFnhEmA+E4B$cBZKhD|q9dH=Qb_1ob2cYjWu3G*mulT+~Z*|aea;O~X-lcx! zaJV_sobp-5AJfwu6YYw%mV9s3mv%e?KF)41(Crd}3>CZoo@fVh9-Tc)2D$VY^N;>k z{q6i|UVnYJmX|JHwx{tFRV!5QT05?zz6@Q1svO0C`;2V<`6oW~dz;t0PPsJOD~dMj~>%EO~PIeu)c|PmoT-aQTR%T-o@^ zoJooRTIuM>M!tVkFFob6@o50jInQBpdTyndtxAYlWmi+$ASyk{M7mcU!X~e$qBm)N z@_9u4dTTDX#h^iRPG6TR*cTwewTDXEo!b3}+B(>}j&|18CC>zq<6&$cbO zI;I&D#>{n-j}tijNXPIt#z-X;uK7;~i&+~+yZfv(p2< zLssmI@4Mj3zjcb4V&60J{&zK}`+RwoysxuSi1@jW@|>|&=RGX>_Ro}N#g1@8?gCbx4DfJL; zA78UKOs}SvHGe+)KIn5_^8-p=jKx-dOD!tL-Z-`T)F;bN_up9gD)P_r<^7KtN17No zKYuN0`}dh%YO62LKKZ~#rW&iLWHCO&oRkBSn3z!aEXTz=eZhkL*2?gC>zNC$I)q7B zkblZQ2If@#2JK=Bh-qf4-Q}|%(#ik(rvQv1tLw{8;cg}6nTpEgPEaEc^SiGo^N2d7 z<~!+O{*PZQ9NESH{PWM>6`Wr-6TUa;t=vz~d2Y>C{&3a*sQqI!+3!1VyS@>86sW@W zcHS4?+Q0MM@yX|Dvm`)4+GbMcQfR_<9#=j$Ro`h|f9ILg7Z8w$Dnt_dmB0gVJQ6RX zXk|BX=SR#H$GWsq-n8+}8vbph8rVFI27eV;N%lPM-8edZwtMV4W$+-%#mSO<-qyH) zxQZp;)D-O=GYxJu%aq@LCuYeK71-IUh@I)%!t{dg#+>YtkSg2qxy!L94?WKje~L-V z;A^gLK$;xEeM)(s66tv5;>AJKApD1|p_IQAHKq`!Vu3|K|Lo8$`4v@P{eLu^6}0>+ zMYJGjkSQC<(=R={ti0l7ZS@Y8<|wx1t>NOmpspoPUf})WZpi~!B3nMR2=*A!+RATP zqeNYu@rw8#!>UF@y7Za0dYh{E2);>^_?0`CH~R-DHS9bk_FY?AhjDfY=UL1aoQo1l z@@z*RnKF3l+vPH)bx*&EqN|cH3`s&d<&nQHw6bwnCj7FlPHgHl|33>&ynp_3;@wJO zS@`S7FP^PPq}0(=pf6yv!U?F@&-@b6E-Kc3_a_MmGf2di>Prfte>$0U zYxzfhQBev)h5#xqfPOQ|T3=-IKkGz%^D{NQ3`r)5J{7FP3rsk1(HB3yWR3MGv)cC{ zVfc@yhX;9hO)I8dTjW$)WN)^sr-Xvsfe!-Ye4c~T`_orsnEV{IwoDjSP7r0~Rjtpp zLzaVENev5d6PACMcU82w;@x6FeMm&H@nWLjvm4ljX#N=jKLWYc@OxPzq4W~+U8n5y zL!1rGUwhuR7EC6VbX%#ei*pk)sG-AVDvj zRrYC%f$54LE_faFS6k~)9%kj3V+%5Ep|zep^?p1IRPpF#8##!c%xtC37H0B=9GAaj zCkw2!bU*H`=h&0H)f?XGy2pf~M|I-SJM^AEID`btv5+K5$ zY^$&UwxOMg@TrkrkUdStxVTBI3gN$^sE#$ha@8i%X+RS_>l<8N_M?8;&`&a?S*CD= z@L0OcjSgz}!zsPHujUEW^yG-7CPt8}`k=B^%;@RT_+gi$2qkMHfTAF@TJCUE>9k_Tt?JVu8#z84s$SZ2#WYl zMQsI>70@sFtX$58eZk$niJ)%F>s%Bwf+2H_U%+0>M@_hMq@LcU4IBJ{%A39u5wh9V zUk50TfyeFYx7Zdo(tWh2N?2z;_0gca);8an7&@9P3pMSp^SPq<8Sc7K`*%*oRmF>Q zPC;F^egao+u{RlbFiDA_=DyB*u=#)jCi@=YbXhM}1)AU3kLXGp-*s||5;p~zLE_E0 zm$_Gk@T2Z_TMw|!h+mF}djx!_#rW~t0eXVKi6SGbW?OOw_27a?jEtn`A-_AfB@X5o$8KVf`^>W3PK#NS$%+Di6jG(v8mCPhW*8AxBpZ6Lw&$H9U z2sHPyyWYtX?nBgv+-jMRK{aT3W}zqhTnMCz9L%YxZx*hf1soXLt&FEd_C!6~(+A{( zQdJ94hbRiLjMm`!vt|Pr$xa|71KX$^ zK^?5Z4#B~3O=g44{=RX`FT4bku?{a{x>`$y#mJyV%MgWZS_mUU;8?7R<`95)Ouj*M z<~rP{$UxJEpyNbX6JHu*_{-c?N1z{JX-;Y zTK?j1Jm4iUd2-Zi8jEmM)NUD3VDOTStUx|A3nJo|Bxl}#5m<;QMa0r7X$+qNAE6x!ZD1gH!EPcWTx;EoVS6p$*`>e zLCjm4(erBR>8yLmx%zV_g=bbruZ0mglqV|(f9A^i&GwPXp*FOn>RdAKNMZNug10lL zzk(D09(_(}(&F-|JNsW#F!h9sDxX>E7=Z_I2_(Ohq+Cur$=}&lOor4Fu9LQOGo_13}BNO|#ecjO!bo;;S=CIaKIM$V)!3q)a0W&ymJg$~x|@=luDN z?I+sZWq_IN-T*_H9(sHNBp%`7s*=5mBP(X#>oq`sgE`NqtkaNe9dVg4!AM@c_{Gf( zhlUy3Ww1pq-#i*JF@Gz*be2BtYcN+5vU8l?p&zR5VA$GGepcWwy=d_Dh7dL0|TpP@uaCl7m z+@wCW*4d;z^vGzi>RrUZ6?KK9qU_s?qGeY4snN{QLMS{RHG(Wvajck6B>;4dC~+{g z$=p4{;l{Es1@0U;XpojMCA@4%0-jJ}JU0^YauI@@rtiEKE)=5}k5N2fHk^C%v{FII z#iyVZmtV@Zb>v!3*?DXZ*Qj#~Y7zs9G@5nyuK^a%XV#7~Xj$~?ffXxk6n=Oi2~ds5 zG{BO)xw6Igbduc|TzZXV8Mk8E$p`W#aoI__m?Tn0M@O@tisU-u+X}=f9IIOV-X23o z4P)qcCIGk`>a_TZ01oQ|GQ8sLJqppbsi(vfMY5c7@Z+iv0YO0y{KSyMvP-kTj!n7j zUE`6dv2v0XiP<2oGPwt8>)_ZU`@~(z&Y^=Q9So6jJcWhKc%byQEthW#d)sFZXp=G` z_6)hYY`g(@=Y=X2b+B}0cc-pb_k3QR_2bl?j`(p;(ZC4z&}sZJagC-B8lPc=pGz|E z`EO7_;!lS$l=TSm@e821xDsj7yUe)YP+Z+cO`Fc>roj_DNSr$Oyoi(#`L!a^Uk^JoHFV zz`Oy^?j9YjBX2MWh$|=zJ+7PQV^>b4JfDw>2QDs*H>Dgx!7{db@os;caaWiBzHQXQ z>37a9Uc5hh66w*xous9=T%5@?mMvl?WPk3!Z4k8;Zs64m-)$>CV>))DNoG~9ci&{q-qF``*&+j@O7lx#BpGNW9SrKnRiA{tmST9t#wb?fHIZ1`lG$Gd+Z zo^`YSM44L?f0Nv48bdcyOiKz2cbX7Ddqttyuwg@Kp81Pw@(tpF!nQK)_;{^p-IxEd z>+~7Ql~=cr3}{VT6y(=qUlH}-`N@LYIt6wDpF^ z&u|_Pn%VtPg-K5c;mZrQpSAuWOh12j-@idijH=V+ufK$rf|Fc-z;G&$7(JT!r|S0h zbTtCB#5L7ql$(mv$H;|^mt1YY2-2}<`{sJvc3sG`7CZjAG_5&z!uPy>%LPQ#p0135 zLnb7CMK+-=S#P0KaBvP;>h_1(CjDu4xx9z1{Y zWG@urv-^MkJgvN!Au=wBb_fXxVN>&!{hCTAGkX2>-Jb0py_=8ZN?cRrOuIx!Agwp? zhE8|w2z!t!R5SZXTq|9tgmR7nu|!DT9lQ522&@{@r)jUH-feO6HLf3x_OJW&>ElLN zTk{8U#sYD>uXD~LyDbL3fCDVH%Qv1eL;F>HhyXj#nl^mLVgxg`#m>s8Lpkz9X2Q;o zcGk2+Z^7htY@_7go{#K@-IdBaGs=r?G9_k#`~LG&5&x)0?0*(SwJHmC4$1R(d4(2XSol_?Cz z9c{i%%9cir6d9fjw+6#CwqVH-i|R5n1M3)a7Gni69*r}5B2o|}y#HkY8^_G<6V}ZF zy`J99<>jNZ{q9V48WA1_N(Ux85@a=M_D^}6qO8{#XxTo@H_A2B&DeV&!A;ZH*7#-_wbH1~2YL__OpU++_Ei1c9=nA7g4Gj(N%rd$EI`i{)`aZbq ztbK3ieOD(Yk;zda5q-~6`UTWbDUV|aCnRNp`wsNZ%})**R9MWV>UWbRL5tL&Wy+_^lZH z&LmCf*jVPi4QWX`rgb8_O<7X%tdr?MC^@sDy|7oN92~7%t5z*_cl$e=L77Vef;${F z@lt}5+Z82cgKpPp=zrko@IM1LUoo1h=A?D>j9=uymR4&PXG~goXw!rhCDHXaw5I82 z7&s%T`^Mc0RY}Y`w&$pLejvs=cdp2t=jQHiMYfS8Jp7`^-<(^GF($}5_)?NyS)KbY z+rJ1OB9-RHcN2}bSrs*z=kt~T=D0|k8)}cD%qn)bfd0y;IEVbcagM>|Oq%o9S738Z za>RqwdAlk|%7)h`_|+>;b?Mip&ACV~qEFZYRyAvKbdSnMIzA0MjrgmSK5HM?4NEF# z|7{-8$&zL=>G?LUU5=>Z`CDIvomK7xqZ25__uO>-3vIde4po{!$Bs=lJaADRe1wNk zdW>H4UTVAkWasKyH2U?)zm85nijT2GuhvGbve5-BM&bLR`^*Xr+ub$57 zusY82hjlvcRttuWvwG1u=%K=8*f^8qEZ@xWeS^}DTHac((?es_!^ECJhwPT0KGBJ9 zztLUmiTmA`hsHQAeGpl?Rqt%`6WuhDGYx~%ws{R3H!fk;S*_x&1s6MKUSB(vEzrfC zVr8eob(lqV*-rDGhOdTIFe;GH)?WbTxn08NO9<(iQ44CScn_Vt<)`?EzLRc`&lzXc z(R5pGRFiIV`_Dc)+%PNf*Y-mL6DPVJ-L5K*!n8%4m=yfx>*5M`xSaV5f3nl%44DGV zYPj;^!n$o<(R67j@>(R9wAa)$+*t(PvuJN@VwitY*soIUr=3B6&CQLnK8dD$6!vSe zMK=$>ie=Q%bLF9xTiKdJICxbbFf8uEjQTHFOMAO+@uM#nP3f7Z&Gzi{ywztxyNKzX z`hg_%2(sMNW$v~|6(u@}@66u@rSLT{)wxrLgrvx3k2+ahz;g2_sG3@fU$Y}(p07cA z7!k~z9KjqfH=SSr6(4ilrqk%(8ohp`dCfml9Xh2L-W%~^+8*Qc>PM4~nQskjrS~Yz zf74A2KUTe;XJ#}G>b$>Ac6PQ^ajCRV#Cb(NV*b^_lSyalUywcx8M(#->%Mabyq%`z zvf!nI-``Ur1z+MS<@)z)OQN`>^DFD%q~Bx@=@09w6tvUxB9JLP*Udx zHI|W=$sr(4qi%ZE_BGE*ddK7oe`>6MmwP8^t6PRB`FL(Zlr4db{HDYCJ>hhhe&LxK zb#r-myEmVX=sAcTxcj3tb=Rk7BLA3EU->b%Jik?;lTenOGi_p3$L#;tPV|hoQ$2yU z&u(d~;$I{oBF<$T%~q~ml4pW?bej!|kV|5R^SDkx3+B}o(icyT zOFUx(qO7r96(sBBU%uzT!|E9XZ37)$I68bHLl(j!5FdPdfLc(|WxYTav2*WDH+n%y zzjykr_^V(uJ`adHc3A48cdy^{=?S~9P@$^j7yx`RF3IAFU(cvk$}6ALsp@@m<+rOI zz5SnS<#98>d;@X%s5-8EO%B4e;|#{OrIqcHw-^j2*7x1@+vt%qa4SaFE8hO{0g!0N zN3-;;BmHT})D7*Boj8|aqr1}iqZe$&+6ULmU$SLB{#iSE$VSl=l|5n!JvRL+5sAkZ{3Gaz}5{IIs4(v7qD# z=I_5GcMUO#VB8d}k>hgLaZ{BgKmBpGpci?-=Uy!+dNDa|H~HE}QY@Q7giBVp^J30x zX&!7WOaUv_J`k&qrKRPW2dhZJ0RV|Q(^sEYjM+EKrA1*u()Dd-&ACS~S*ztC5#O_U z1r1xLJI+V1J~V&BI%CeUF)&uwudL{1ZchwG-d2 zxJdxH1ZydLiC+3OV~}m*j$H!_i)Fa9yCh=eu5uen0$YVO8+ZcKKh+_ z^X5&BldYLuM|=B=u>h)|43;mvFb~-}dzFaeS5W1&Jzb`Gn>PQMF^va#ZcGxabDn^p zB+i|{9j3Z(-oB>{{h&LEIp?(ym1N`-q#D3J><=m$1Emi60g$oz1n-W3mBl zf6ePhwnb2i^Wjss-knBH>5zWvxbtGz$SA$ypQq^=celT#>38jP^=53909S&^;|BYQ z4xd)yaWK_iN5bR*n{A}KnVvBeLb`Fc8BU4 zwx9PIKobGvaJ*ned$0^JRYNs#he? z6q9Bzm|N^riLpsbE2zjygL6j1E|O)bV=_uOne5rxW0f;8nNg5<#3s;Wvqtl_sEf$U z+1mTg0X$65c777cL>HvyxjGP_szr&+>P`+$QjaOe5VOV4>bZI^Xe-a>#{kr0U&&Z? zIIdW)u3N@z(3rl)1A@4i&;R`Hb6UMyf@;(Cfgd+93?j|Vc>&uPVUJw{Z?mztFx3=F zi^G{#Zqm9}W5TSne|3zP04RXU9TpdlAJl$bg6zhwTi8sHe-UTXP_;Y}1+*>lLuuRu zmOUtZCP8ju(Zx^XRBNd7$9r73h5JVc|Lk<;X8XU%K17KlGrQO~bowikAPm<>cXemX z)q3)x9T-e|3h+Sa-WO!dC$p{(ffM$OP*YiAEG=8crEFC6;(8Df;Lcwcfc0qD{nnJR zBH)YS-J?+Cj+lx+M1U3!{1F_%xcsb`EGfqiiAKijteWh&j2~El9*Kzqzxh3(_s@~= zO#p$$&@wVD9Mz&0Q~WrtCF2O2qiP1|MA5<9wHH9W$r#>i+qw?5c%D)!uY-Q_fM{I7 z!f6=h&G@Y;a!cOx!y0j_-+#Y>9YTVZ;4A@~#z;yY7D>s|M{79sfi$sCFRk`mCzvCX#bb~}m@|^kil9Kz9&_w&bE(n0Oz}Mgy5F8XkJMuxCMS;) z9}pvg*XFzIxbj3ME{mp)VVS?wvv@vz$4AY=R1f3^!h#AHDA^E$WyA{nLVX$zZ>q3I3@8Xh_BBG*y)OYyS_Mn!ERhFx+d zLg-0!K|jbMl9c_mchWpEd~jryZjRB>DxFNY(!1-0%y=!6pJeKpD9IE` zpW@ky@@PRkIgDO_@7zB+Z4Dt^5|*O$4xRR^2GZxStIA05i(YICg0n-xTXkD^dCC>{ z`7>0O%zLLE&Qoj8k7{IUX* z-0ChuCqwJ+z&wE!t;*01SV9F!bRr#F<|*5STy7;bLNeE*BO+rHK2Rudx_vx|;#bH8 zKDu6Lz>@WiF56sj|HqasU;R33@yO{KFHj-U-+6=Cle zJL1YzvL<8`1#ZCVA>$C>DcZ-K5d6RI$D&bK9gDr+Wz@y1m&%=}7vuGI<(_?KvMNfF z7Fz}lYbNZBY)58XlBtCF zjE4#;$E(3x_7&9!fnhx7evE@-)d$e|A7l**@;LIL>S61sTk#eYugoQV-B#e|_ClcY zWC{g~z7!ZaP~!o->7%{b{=>F)$pOXP_;#BgJ&_bmFs9)WNZg{U88DG#ok>IGD!{_c zts*Xw>{600Wd5o&7(8slN?PhNG)+b*DOpd*FDThtRjel)p5&|XYr^^X^Ha+@7<-rQ z9szkTK4oc`Sf?x@3)#9r5z&(B3sHjB>y!4b|LVy{dS$O&%pqvjERNaCh}wmmS6Pxf zj=7-(t7Ycq?#QjL9e9Q8$>Ti|f}Hm3GZ^rVV(sy;D?f%+g;yZ8vRA6|_NF9^@6pH6 zzNi0zY9+w^VYP9)T)$ta9E_M=5_Qe%bo@jST|flToIKA-<{70TOfg#n<4c zdEUEd)jmBK5L>9*v|%*~Ha>S}e8AJXr1=U(9`Sp>zX_=X=eHbc&#PX}@={8ax$)F^<=VoTL zB&bzY^)@nwy&MY%v7EAc51Q{?Cff&$xwsm}{ms>|P^5W;3+OPgf>TNY5%A1z%bkZX zZv!`ov}SprvzZT{e9S(%2oznOBc6G;F<0b0nXIO1bA0tsTX~byf|q{2rJ_jR%WHqB zf*QUfdlILcB7oup7<+ez6h|T+IHUtHjWapTkrI`2$+Ghpr7~8217CHoQl8R7`O#j( zZCgUbTAw5`H>o(EA3K27f;k|!AtKg9C<9po%^_S7hl*-)r-9b)zco>nG+b*4MkEcUT2 zMQ5K^%c11GKOWCvJ_9+TW;SiyNFmYR3&JT-EkLQ%TI@v6-<(1s83Wx6(anUkk+gQZ zMk~D-@*;f2ucGIcYDH(R=bwkh)f2hi4CD(6O)iz(5Z?8CN)2W9F8jY69&2B%YS7~O zA*B$}w-M|HFrCPaB7x2Sb`2*T9}k`M<-^yJgsQx?EvY|ca2Z>&WP6IlH6x0Y+CDTo za@O#?goSSokj@1J;b2##oYUlgD{^(8{tEr1=sM`r{C4cf__iLy*Kh7LnkyOxIdb6S zRg=m}OLNG%y@{x1=V-mktD*c~?Im0i%_}^Sd3jNKGbwTo#&g?IvGFv-pc3G4mn6key#a_X=tSw%Wys3g1Q%Z zaZUy|w`TGxCL7`KJ+TK5vCoA@%(7+HOxS)Y8(_Jb*-jgJmS1_7nzWQA(fNtW@e& zzKY6{-jPK7W7Vb=N(Lydn|6ZRE;}94r&oU_RDGNL(SNcL`yjDz!lS}UQt8x7TSQ|h ze3%GCpl&bMQJM|4XNj9=)k({6TLVGaJ8EhUXk>ID^>bGB<5#a{Tmz%RLiD`k~zhu1YDP?u}A8C{+j_o@31Qng<1(Bg}a z{_OmNq*7)euy)E4;4PH)G{N#(1DHV|o3$+MK1M{p7!Su{K-;J|~bL zJjCX(7smnn$AA@auZV`V9riaXsO}SX&${Xzepf(yj|;d%YDW6xFHbGyt|K}>P1H;W ze-A(F^kkW)WTXbAez_FAud~NPeUSWphg}F5#A$H28RQ=S@&|1KDF9OrR_lA@(^%JG z`>PLeLXB%l$gnLLpSts3L8wd0hrmq0B2Jx0%fse>48GoOqu>0RN55io$~r<-933&Q zF^!`PQiY+GloJDXfrWmi-`A^`ObvgGsZ+p90{6mrpZ*n5pH8$ z$E1r(wM7i0n%yVM(c|8E*VD$${@9>aac@KCV&!`e^u1;$65p@-iCAz;rhVz$)wU5U z1O!q~M`Tx@zRuN2v#y-CQuTjnZ~JE@bu3d^JDwX&!JEaiT=#4Dr?RT;ACgu1%zyR! zoW~^(V0e!dMW`o8s}o!4ibb!G=8aveoRLxWUY1;NLb}Y8T7;_1kwcD)Ke+kwN{5}* z6v0zkOdAzBqNbIf#f(ZA zQ5`;A5}raePGOP5;WD1ltp(f25W40q6kBX}nIzJ0#(@6m3-d%JXT-xNswxx`vJJ6% zuHWzT>AO@^e%;2J!1k(Te9$`Ts9n0m60ulx2#U62QMN#Wg@-J^nom1P!6p6)xY@j7as z4NG6x%UF6&G+Odd(VEJs6EUxzd@Pw%QEhnoXH72$fv%YuQp}(8CIC z6EW}0g+A?azRSBw7$bva@d_zqm+kUdv@ooK=!WW(Ar?reh0wt+g zc;e}=zHnTssMYK-{gpQ40EY=J>b@uPK9-CkBqw2eqt{}T;~zUPKK<*r7ETH*)(zJK zI?|qwN?59mF$dyC_tR!i`r3x+)zTV^TwC+Xg9i_eukgS0xj6W8h43=eE<8dq;&S>c zF*a3ysr|w+(i;>@5daOP+p%Nfxacp7=F68@8q&eH@_76W=zY$LCbs76+7ZgDRsAa7 zOQrskI9t&vi#dhtXOTpXqRQ@C75Y&5&F!4sZ?7YSaz>h-RoG6dr69?|q9PWwyu+A7 z+G{RbJT~N#Aq$)8yrnKbkW$- zaQiKBj)y!UO_T37v zFAsC;Xr8veK!ZgZ&6mcXILLH$h9j3!-U=$#zx%QN=>_G-k)YPD{_)?$ZFOZx@;6~7 zgk@-!Pg)WBZzH4%Bco2S=C0gi`}=G?3e&6Y3F&IkH<*nt3R}_Ozh>NQn~~*#h>*XN z_s3Vkvt!$6n>yfDnvp?j;>4zC0H!$R$i@4CSoA%@TONTHSWR zf3*%(zB3j7XHz+Doc5xdSB9sPcOy|M(r!?MpRAJ9GyWhKj8-)IlIslr?|V> zd&*3P{JAH%c2<6yg2lDvR_WTRdYem3qg;cQBj&KD$`pwj76UjCh`;DK!>z!6+Zs>K@@H60JeF8W%LYPPfwy>aWH8b)1=-;2N*`$ z>!_sOg==*ahISTg5x1g7E9^F+Uz03*DktOdWM#`0fmFUq52POWwE0cMx1>VddvXNA zG8^q=rH>(Yv-?BOq13vPp{|=yU1jmlw7c zW%h7XBPa#Yd}ND=k{V{`xb9&; zVrab0;z2oWlW~ORI$?$Slkrrmy%3YS(`>BGZ@TS#v7~=_7Z)g-3*P?UiR!=SJkZEa z@2L!3VLUeA0l-fv0wReLqLtsz^AftSqyYHAl7OXouOA);f6>OY$AKwBJ77?P2wT6t zO{?aT?7kHr=F*PEM^=#SZk<0zRS| z^8N*3(8%`aw)Zl3yo$8!)wmk;&Lw zp{2MB?R%X3&^~=!Om* zUv9YMg$Y0)=5YU5$o!%gdntE$*7l$KI31P8T#vIy=iWa09o6c*$kwEmcWe14W2^u; z6kVndiFX{gHmz?V|7K6_lY9j5W#Qr_a8KSQ!^+QKQ@5;ahMfGa)roK z?)3lB_U3Upr|%oLSb)DCF9_Mi!r(GUNo>1yT z3<8V~!TKDQJm6!?!tVk1WuAk9!H&SdKsi>ys`j0Itx_uLP?}ikdNTN2e1h7QIhlsj z{vMZk4hUVk=oF=ew=_T{h+3K4_O=MY=sMDADv=49DU99R`kAV0E)Nuc0j}Y9vuB5JcOS6~2M! z0GVI3wTp)KQ*X!h8~bU@78L--%Zun7iHxfGk!6_YGVcOpVmT&rZC`ebGT#$G{MR1X z8Rlad4^O6``4pVwpdpXVmNQ?-2+|LQ<}s(ly(+a^o-Uv zYg-MuV9s?LGW^2?P?-pgo8I2iCZjgU!1@Ab$gz$=U-fOtEYZ#1TvZIG9=#B`yD_tU%v^WV0Ac+)%j8M!~WY*way2o_jX;jn6GIn!I=x^~WV=kZ_{FIbC zeE}8Y^4Pil9logIxX5d7YLm^5#<*Hk22O+(F-SLhxS?1k@frB`xfJ!o`77yXxL#Lo ztg0TfVKk>@o`W#ad5))cl*9HxZ{d;fec!(7(=vZ}{+hFO_Mx0kR~%MavNsppnXIp$ z@gtIW8eJW?VzVsSj?^b|@oUL-7FG|N1!UrbuAoaw2a8pF7LNB>h?g6cIx40Cls( z`nRhP-G#w37bqPcHtP|7h%adb?BB4EynkHg4_qXHeQ&s~-8TydeLNBWfknegs8sj) zuDZpsB)zZ#4{m*pd=hfCBnysNfej$*gkjRLYvy}bZNE3|G6qk*ndL-{tC>+A^>$R} zcI~WYPHJ-B7Nld)QcP`A*6vgamO*^PR<}JpgF>!+Q%JZI`Mt`*Id!|;b2cO#;EMTt zqR({Hxo*axSl~<_YoN!Qmm{XY^EEw{9;BV=-@}8}7#*h>KRdmO&`;eUm}W0Qke8HLRyXujDGcV>tOJCK_9fV_IH=gz+T zQI~_6TmDCXe?1LMxIVP5~2i@u$9Jp5Bk#;01t#W#KX#d@|&k3#>|N4>fN1u$+ zb3io6qwnyd`7`(D2#?!m#^5#RH}~p`vPzrj#3B+6UCGeMBxIH+W&^YDmxBsBQF!c{ znJSFl7q6gjoeJ$V8p-QGL?-J4Jg@k#JI$)6oSL#Hy_SYG7ADz{T-EeM0zGiWll$LS z9{WSF^iLp0)C=fY!wGkNMDi7njJ(ZepEldEi37Ql)=!vxn~PQ0#rbM~Bw`udFTLVQ zbVFVbJvqrDM@x&3#921MTX;t@>TRVL^0K3h;JQ0QADVDm$e=OJ-i%xY<;-zlfAcAV z4uDE@dU0@K!0+&U3r`&-4?@52JLU=rdLEz&CbwSAn~E$X??ij~R5M~2{2^m}is!FelG$5QzBNFF?RYqO0tmycu* zA8CHm=U3(23#&|~CRjbp7(ieBvW||`em&bGs}hq{7JBHSeZ^i6dP(uANqVn?B5Ev7 zVPfHR+u(Kook{6oD$2&b8OtYzJaz2-WwKArX&?8SN4-51jQnYBywqYm3-lE~SUOWT zRIV~LH8pl_M=!nJ?!m@-Lqk)~?9e~of3}x}!@d5h*lV9U=Iu5yB;WC}YpW3HSdvoj z$LO>{U~78f!f2Q>q}G7)M)#Exo7f#Cxps*ScDH_(@aSFAa>*qS_3xahNuP5F-XN1# z!Y9e!zdbMCvF8xg4w;A8yz|@LPy|;V$sGI66j*eRGKkL&&9k9Ft6R2i?Q-aR7MAl;_#*tH2wV&kH!16lx6s9PXqK9uPXFkW&y-@*WT}R(feWk7(Uqqh`1DmCj1Li2 zlDv^jXi!*+$Dwv~bKsB3Wv(k$ln`4qpRBl&GL{+=^XW-&kxKxgZy&c&gTQC7Ja|hGp zM@i$&&(IrEyJWwz-`-v?=O`ed`=zA;7VC!d_2gg7_}4J*>cRA0Uz%?fga~n-_$zXN zxRP@;Kk4k^D>EnIe^Px@CeC`=_RA(0d#yScl~!YmtM8I#+&V{eDr z5t^pawq`QgkHdAF3XTQc@jHYIim zl*NNqRxRL?`=)1)89%<9bn*tZsmj8)$K8P2Cm(L# z+WWi(8Ef308wn=LBUTtvCR}zZ3w@*Xu&{FH^3?V02NF(UR+T!%{<41kdNRoa+@Ua1 zjf{-MJfHK6v@2wx--_VmXSoDilmO^*{c@ea^=2?bv=p zcC`}DyP!$ne`61)Ph?q3e}$e}7APi+=?Sz<>~8hI2zB1W&37!fr%ZZY(aicres90} zNvry_eQZ^sAFZ`1FmP7lq()SETRVV-m#XFpyY@aD~%%_lcOw&>c<)FKhdkBkFM`m&f@woF`O zBpRIYaA&~JSj^=76WxVm2&q>^dt!WWyYQ@;B{N~)EDW0Q9N2B!cR$|1gy=5nQZlPu z8}?sJBmAKwa23Ec#TL)jW6iR5KlP$T1!5%Vx;hT2QTqf*-0l%w1@m)77t>&?Qqx>X64g%=~%Rk z(EV?fnTk-WHg4X0gILOl%ipuT$5gU*htw7?9366e)8f1 zWB+g7u00MbQpm$b50$3iG|~TNc-o{Nda7?C-I{${;W!IJg6KIt(dN+H@Tg8hjj6@F zR7W}Na5vkQ8lxGJp2vnKtq%q)?5XDX<(fy6K9|_Blp|nxdQp#!TJ7i{3B_u&Yvhp5 zZ}5#tYSEKC>}vnq)#|t*oy+$?gu8FDvsL=JB8zSZA5~7}E%wbZ-sN}SzAZer7t3$j zUC26~WBcD5Z4V!HriPj>Qu(qQmlFO2b}N1rtRnLxD3aqyD;_*D`snO(h1Lgmg_-ap z`})@T^qqAxcDGZcmhM`!vVHv^b=uzX07HDHliz3IWT&~xk=gyB=DLx|o_Y}W++hEo z37=CSP<|)Xu?>fgm>U%ANjr5*E``g$jtj~9PEZFF#BtaaYLByj_<;C4(9#YNLRZ-% zG;+w;1-f1~Qs65mYBpPSB}aF7*WfqVQR@rZe6!YlE7<)k^CAUAN1q}rNXD(V9`R%~ zq_E#%o0ZD&C}7ZtSxe)_vcNJq27U)&P|OSE?h9{H4ojD@0Zo27Rr@$TJ%5Z0L7m;F z_L4_aS5fKYcGYpc;ZrxRQO)x)4^Z{fh!b8w8SP>x-TbmBLtH4DE_I~(EhwuO52l;5 zmN~6R9I9JSHg*WPwf6yV6am+aEW7`3(}RN~T=FzNZsGTv;Pi7QYpOX0Y~aXZGRdA1 ziwCv?iMluMvtMoK7~8*8jKvc(zetlq}Y1M7yz?^U~iheGB2S@Zu)tMIH(Xgh5@9UflW^uB-2 z!AF``9(}EvM*Y$JjwR=7QU&+7*ni!bp}L;zFT9sJzKF7HoicLMD7-zM)t%lq)M5K< z%CpV6W|dYH{tSve6w*9MOkt8LU`h<{vutOeoR^C%#X!ZBB?JqloAKm5!$(SnFG8E#UJ)zZj_-!u<-QA>)!s=l7uiY=9N(L3}!ib%}x z&^KFDFVQ%u=FBdB^@dU(Cwk?Aw`5R`w#IjYJ)ehsDMAFK!PLxyvx^;#mE%$#15W^O z?)1^#be>-EvC@#Xk&sqyq&z1)4qSFGVMNSnME8thwZ2nHOGglpnSFDtez}zBlYH`_ zez`4z27sR>cY+5l_C7u)rHJh_aB;?gj{|iZpa0As`0zsmvaCVV-9sRv>s=o4Q7wb- zrIe@FGHOrHU9#y|0HcGCqjrQb?!6Q~0@lIK^ zvGAlAB0R>>o8Im@o4{^sHg?htx`YKa=a*yhtFreRQ|B->y()o5~0UwBX);y^gPWyX=X@5+D_ zKYi5%;hR3@9%(xNzHujg;feYD4|N>8Se3wFa^l4|(m;>>ZhPN4H-f~hQ-_*69B zyBw=8;pFpS~Lkte9`<_>a+CUmYvLBPmqv3@o^otG+78A*$LKk=t) zB=|TRKAdxpfhbwjsy@hCX$saA>(yLpaeKaUN-9Rzt1D=ux2O~f zp8k2|dqeZMln4$NY^I&OUXyq4Cm5SwpK+BfqGEAr6BarW5fzRfR9RvxD3lAXb#sq4 zE-o%YmphESCCup#w#Eb3#sjr;H>Hl zrsj~h82|-Mr+Y0j1yGTcYp3!#8*GVs`(s%NewMc2&^eE zcF;uh$rvhZ;3P(1QsCni@Ic12Si&XHFecEGAx?Qs3q-875JnhW4m#yY_Omka>ipwk zwZ<^5Uq#6`A_lFYxA=&mzL3@}r#i^Wa{s4i$!5gtNG$0yBY^BSX;$NOTSnUEeE=k- zlj%4s84pB*A_N$BS7iE~1lEGsIpSE-OOIJiDg8sIkNFOE(RWHUe1{ zGPse+gH=zK{sgahJ09xL zxWHhEA0Q`w+&dD}&Rx!!_?e_UC@l>Wk3Xp=OpeY})nnvp5U5{(MK173nXEqYu}R1F z^Vp^EtlyC!zE^HkPYj)V97ovMJBxTl z=}J>fqh9^`ZG$rqvJuOwJsfj7uL^faS6`$y-09vV(_jWd`{&Wg zmks>;Ll$&>tTf0EBN(Ts_2aw<<+qE5 zkK>t3e~9$EbH|Y|59aqVHQFZ69H=@=^god6J8mVUPvD*69~=)lN`fwH2JGGr57cS! z{fybO7=>`1G7GVbj_V@iyBDJeZdB1hHYr1u1Z@@^f>ag*vdfO89Zch3dr9;Wj)qtr zffv(jH2i6I1D;<4>?kw|>WpdVe5cdu=S~`I9Z~fgZ?tGwug_{o{7pISi7Ok2!^M=2*N&U~PC(>OGhHbERXA`@f|DdWKoFUsHmjxHPIlw)LFqq}j|Dz^&##BFGT7t%rMxq7wpC2;_ zA@bBZ=}J}ZN;j3nT>J!7+Llkd(`0pm$jF3d1a+D?v4nc90uN=B zs7S&?y_z@a6$DswHTEuXx1Gs*9^7i1wedNhSk7)5I7sVfv&f`PbUmHSn~b_A=Zy@_ zLXMC{GP*gLxr+}@{oar^p;Sui#~2%U1>Pc`GfqzHsA=QYWYp7Krp%x&d-McmU7ZN4 z+D-qo$*AiesF`3!=Djy&|0l%>yL--!jZAYA1kgBni`UgRpX?(Lg%y0RP= zE2a!f024z^Ld2$lH)(Fw;Cn3Lmir=6IUyn8N}qNgysgsq(n+6tS91&sUCa04{Ox2& z4EG*1|H2n!s_}-$S_9$!_I3Aa1`ak580izbJ`AwhxZnJP7c%_x@x}g!2*qUi!`b=n zP_Tr+&S@e+Lfm5sNY0Kt%1xe6E}nx1^{$@3=?yz+$KF=!f*3#p%9b8C_67s@4@BS&8BwxH;?^4 z+jhu{+!fx)M{G8&PpbL?(rPbhebMwVgZq(Gu(C#bfoB4N)|FF24uOsz%8p_`Ynxit zEJv6kGX9C%b`r~aDRtpLU6Xj|>-+r7=sS0d7$hSqORBq5>|5x-o8m{v&CC9o8p+J~ z!NO-x9Guj8o#TL3)uTP3ar6__uKzOL_o^{yi5X}VPH)Gg*|HU18!VIvI&-S39&GiL z7u`4>%2qs|JiKv++K zF~J+YHvH#=1A7-I#ey`v!-ZvBUZ7wqe>ktnb$5olm0Ch|6O>T!rRf9a+EL2O?C?o))! z;jl~ap0AKx{nN(&ALOhCC1%41-#g)-l>NtLiuHN6!%UMX$~WdUG_>z}P5r5{+k9tF}J|!YkA8cO?ccKxMG{i!55y|u$KK|aWLVm7Sn|F+9AJ_kd zLZ)#921kQX1~)rD8;ZTPo4VI=i|mz z_kURq4fk{Jr}I}fg2o?S3}bKCtZB>8Wr@Js|9xyv@n7qc97oEB_k8Got`K3zzW?)o z9rW?n+Yo*Kw*ro%FP@Ln+bDa#ZA6{&mStvXS*qQ-5&=HIF5h47*_AWqd=UDZG~RRl z)0CpALtg)?AVzj_{{@vNxm7j+;)C7rL8|z06%KvFM~;^9Av6C|EAZj1`} z0%#|6v5d6_<{7|{;+fJmc_dp$6A!@P|_z&qZuRlgvf2K}AAg+GDZA~)a-EOu>cm#@se2^sGfte|@0k0yMV`Z;lOE&yjs1_KaPammYfjd&$WyvVo6y{ZAsru>jWd{6}jc;D# z<>f!4WrP!LT;A>Xa;PgI0a%+@lQ6s^caD+Ak5|$ zZk#PLD6x)F!HsX0P{QrUs%BH;iieKgU(-YMeW9sZgQViQI^U}pLkQulGR_(RCT)cq zz(7ZBhXY!vW7Kx|_#A+BtxxH0WNT~dRZMe9eij(Q8iI~sTVe;RpsmuiK`a?Gcc5L< zM%6(E#zRtuwli(4&mwR^Xa>_bv;k^#UXj4FqX{1%wJ;b7j+ATHuP+D95$3ImfVYZC z0E~~&Vf=eUuQNArM;Dep`9#DI06wNnMLBOF&2t=$L{PyWvHcJ%0F}k@NlRNc8#NEo z`#5$+lHwoB>|VGK!8nqkiVZZh&-g5eIEsgKY|;fLoDXK)t0np<|Ve zUeD930?DWd4aFCSVqeTxAX!GGzBhI-{WNR3o;Aa)>y`WlNw#^^>xTZdchbh z%^RqUFNQnaIIMxJ_8dDtqmOI^8MOvOelj=3!6dFBP&7G94%=;twE7etoAoT5X0}6P z&4DZAql2wwJgRC^_#Ks#n7KaQ+P?qydF$3Z!qsIu(d0qzmLa#qmz;CA(+gdk$J{!# z(r^JMvjK}~RsXoj>HreGg`46FB{9hv(OVJu{_C;6XxONT51I0-#`fGA`RvJ)6sMU> z)Es+1X5AWi(_vjQF)Goqcyb-s6r|*XQddI-6T%G6jSgP@ML+vLPVV@)oI92Q%rCJ^ zW*Hd9=wr;)EJbA`DE`9Nn8AuR_s}6VH&J0|q16UyL@W9^U zDpQB!3mj`)AGcH9%VGv8mTBJr`*xmoIxu*}WuIu!K;6e#AdqTw8Vm>tav~$AYOHwD zeAGMc|HFPa36%~BXUw+BeEF4KG6mbEds&J%(M{pC!*yh@3hm<*ojKM{y18GoOA^yt zDi1aE7J0r3gqE@ZXGSjA z(5lObLj*5*po9;x6cCgWNO()<;U)~o^;Kbvxmfp@L_-0ZU&IcF9aJXz zCt6pf{l;-PiW^hilk=@#BdP%&4wF}cW-Kr}V!W!O-^IMp1zm?%d5x-JIgl*;vcq8K zLRRjw>QR6Edk5zAkHRF(#!n`NMEt3 znMPklpg6Ib-R_w;`W#vQ!CNKo++J>`W(x>*2$1iu{=^5{Q~*2hD|rSa2aJNgluSCaQ6TFFTn%C-vEdDtLGBFn^*; zQn*5c9Y_|^3_(7;;NLrFS(e}~qH*I=u(zOHr-*vE>+UFjMx2yR7?ANmi=-AJuPY;m zNK{A^v@EX}y1Kf06+>zhj85W#bTL^h2U(f7d@K|@>NJDw#f-+Q72bu*Fh1(AlyKdkw3lu`{l2WdnnGLKJ(YW`hD1z4qD?NAh+5Y+%WV+wh{7{a_Bp1)q=ofvnSbO{TcQ`r~?&~ z!$#QdvMylFB>5Di3}B0#M$ObJ?R)R#g(+Wm1{Nelukm=9|G5mv$+PEa4g}wl<(>cZ zNN}FaMH9a*V4ua-b&O+@__DcH0JE?HtAPF%>o`RM3Wn>_QXtX09q)2Xv{roF#~XdE zClF1=-h^8+Y{VY;jPx0W1>P!ck_;k)<{L))Ww>ou);k;rnUJ&#drcYsFgp7WyrBWJ zzE7^|@veBkP7bK>Z0vO8z!lG9LRc(rqlP`S&~>02)W8fc!ST-+8cFln`_UG~6}weSA?y)d?=32@oBZ#bx1cK#S^I-)_*ZVnw~W@-E{r5HG@8^I}W)M3~c9cj>El+{jC zrvI{e-6u?fkl8P=KuuT$B1$6tvVqP{=TaJFe7wW#;lh>Et6;S(F(0d;CxR|qe}sdn z$8nW~f|RE5ar3}7B*=##UM{(pV8%N0Q$b9roxP(tuE>#qDmg8~)>RiqXy##k&X}z6 zxY)eLDNl%?bXKJVkWP?zn@ROwI}(o`g3V0QH({wk+K+%-c8Lxe=Je@Fz6?h!(7!9w zl@WYK$&IB`Un)(*0fC?FaV=95K9+-v=IiVz&QpO*iP`wD_733*t(`M}-ycCMC+5`A zw-RXy9CDf4bcojwjw)fAezr6Z;TVgrw!#KwvQZMVRiKgyAwcA4GSr;oMzp_hf2|Yz zWS0Pu$i%L94B(;IJzwma~Zmp|(=3^`MUK^oF{D?%L|6>tJh|3Ce z&duMH!gl2u{(b6?)ga|q1U{S=hv%M26h#&33r^m1j4bbU8n{Sda|eIpIt^OzRVogV zN0;IhEsl=hs?6dUWdC3cD|RBsfreeZoYNe-GN_5`#MyDO^kqO0MU*5-JE=G#15#Nr zWc4l|3UwW|l?Ym5?Jd~JQvhA53d93hM&XFT2Ju+5#xkr64@#vn_fxlzpxRVSh$AZ% z%{`fhPP*C0k+wu1|9SahB;VQP4%CWGYgpj~=^lD=eq;Try720S2Sr6YEmqB)n;gCm zDbu0#U0hT49T#{TSbH1fPN6$W%FJU)b9vrbX|@9T)wB=61Ifkh;fp@F1c^t$g`K9c zuKxzY#7QD)AJ6_}d?lYm=A4I-uqb!}+{E?8QFEtv_>X}-Z?=1|FN7`BmVq#gs`x<| zdQO)sIniyZORkGs-%{+K&C53-#pX7jTwZDfR&9e%tq@?(4;D2J$`7TE9jym;Ua@MG zjL0^9yG}vgk(f!NGmKtY+@8IO7{ZQx^W;YYr@`XIi-ogE!R&95%f`b2suXA*B z#@0!3N`k!`t*da7+2FyIFvKgFqOwbpT8_jSly>$j-d2TshFnpf#g_R|WS#V-^}K?^ z9<^*U9vun_uPtGDo3MFTjMLnHjulr1weJR^>VqW4m#t5JyYf+=t0O}nMRsf@ffTyZ zbgn{c&u|Fb(#^IoTO;yzb~MY~BhdmwR#P$tPVP-O`tHNGv3)v=bTNV<>6WPILDiW~ z%U;yC#$Hyq+#W_(#E)(-__d4^7{W?FS)p{x6iEdQ;D^Jm!X+S(4jG~#%Sls5*<<97 zqemlv>xCskDHO5L8=7tF&vX#F#*gREi5LxiY&cA&Bn!Ia8Dtz0+;+Xowr(Q05ieS| z<$vH+Tksqwvx&?0oN9wR zZ~wPCMAbZ$2c!hr*$d}s41uk^KVQfmV(i5)caF{>4Y)onAlqniuTzRHj&0LrUlg9- z_5eLR`@|%;IVJnMRhK&5-N~423KbEt%%$s}%F%ZYKlAXSLKv(=l|FXp>pNI`UHA8J$C!}Bb-vfSPt=~Z+ai0POvqOSqRPGe-cRp{#EY{| z1|T~!fyrU$M|)VPW98$B?|sh&6mTB$q?77W53bPM2+2ZQ`59MVLls^Ido&C7iK0Y9 zFKJ5Axjo6{V*iDCFXTLDPdr?4hAKORSGni^b$2zjOY@Spyh%zRnq9r@ z29dyV$u_makF={^ycn{?T+Pw74s(YNFDm0JM^ZQq3-b3Ha3)x8J02OyD{Ki~O+6+f zJC@TDFZXQ)^=>O%U9G9~IFWr-gh@t)D&Aq$D7EA&1L}K7^qS#e0Rw~VuFsS9z*#cm zhi`XJwigQx5Jpqf@eb~~0tXlZEg_2Yr{JPaRn!IW9C!MxtZ_G4V>Hivi3uQ+ZJ`4+ z3y2^_dul0*2K!oI@~u7jnx6`D{_GY_b7N&Y>g-`5SmQD*##`VgS_ac7pYW6jtW>lR z$qBc0vK%(56?%i+Ke_XiMqKxo%G{ zethhw$KrzlbxaZjQ8xoB3C+iC?hbMC>X`hRluY}mT6aw#&{n2R+FeLa9uGIZczT5+ z8#!rn6R2f=*MDO;irxV#jX7C~5mfZ+H;3J0eI5oF8kSeV`Zz}WxZ|YOvQpR)Rqkm- z@S=hO$FWXiU&E58<>oMRY6jP|Rz;@jnGSSbDs>wNOKE`B9+_>YC{fVUm)b3KrQx1+ za7tGa_&#|ZHH#lvT;}`@;YxMVi`Azc+AHabsEy&!1|k=o{BzzyxOeVh!qnZ+zk_%ROmkk|uLWC(+@Nn+kF)bLJ9jV`lWB9MRunaVkFEhSQ`B z>_=rWk8BwAaztvH(oLSOgkJs2w!g0;8aH>_(!s3e*)wBGHj$*uDTH8eT=(1}Oj)dh zR-A$6(0owf*^|~d`Hjnzd3j=#J?t8lb4d$2P12<>N}B#f4$CL=<&d6mEM8tsji7!Rx8oWeF2@m`)U9R% z2AnCrn`u0)V*a9~>%;7VujGwAs`j+XE8T1vf}gR|uU^@2YIFj)+rif2*`c2%o~i8I zbx(QLQwB~cRR+AO9fLiV!<6)#MQzHb8xP)krD!<*@`W>(O)DCvuU>Gjzjmqrh$ZbD zZ+IN-Vr>6*gkHdwbw0Hh$aq7+RFva+s9>txF(KkfY_P&%nt>egpM~V$y z!QYi5Lz`dq&fehvh{N3U{)S3o)wj2n8~QtiL=71-;^qA{O_ype9NcBW?CodZGrI-0 zB1IPOcM#e#=_^c_&~KNrN{8}=1_3=|4fazUNZu&Yt_2GYkZpFpx8_lYOBFgu*L>|) zu~Jy0V}4FkvAncrE0m#ct!F<1^XU*16}&m!V5z}!r9zk5$0J046+(6{K^+E=eS7`XlV>w=MgEReCxF1JRRB2yw?yEfxn zXeFI<26sNd9w+Aznv+D0ecBg%qs}a}Trl1HGXlPwX?oxg%~XYZaR)!(w~}dFV$^ff z7f}j#UYZxPEYEFOW7lByo!?8FbhtC3SNnGD{4~>Xb-HfrH)LBz=**EQ#tzg6j=SA~ zbE2Py)gT=^%mWkOR2cvT^g7>drgKd!)SRr%B(ox|Ck>QIvy;2dvdk4)Hiel`%a+%@ zc9L1)FfdIxJg@-QYO6Wtj*qL>H(!1AlleVFn8vA}-_nT7U-Y^}*i7@DT}jjT(>O|y zw~XHcrb*{zbEq$HqyK>|Rr+z3&M45{IV6Oz0-hofet zbS#V3=|K~EeXf$4(HOoS@Y&$jT}K8j$^3z=ZMU`IQx5Z8tuj9MT()z6rH-C+KjKm+pY#oJh;s7t?PoFXBy;Gv?78ZS!!SMipqN-&q^GW@fs5= zBno{|x37EnD;)&k^m%7kmVg#V84tQACIntxbga!?9@bGSMDq~`RE$ek7YzW2%Jz!E z(UaB=dG_?_@l+2>T6UeU9x~oPuI2jjKEs}AUpX>sWWRof#m5c|nx)$A>AGqs zoHef~vi|NywK-RZHXo^8YPH*{XWgTkM?(+$SzO)~knA&Z)sH4lcPA(N_yzZzMOE7> zM(vBducNC9{B3a{?UU@|WpR1t9A)FzdDYSBbFXbx(^59R$t(I?dl!#>=l6|zJwyNnE1n4aLK_~Q)-NXQ`a({iFUio&qO)LDH71aLv%hu3O z78o*vVC9Mx?XAYlIpc9R-{Kx^wbS@GILFYlJ|s1%?lL&T%6oR(#1D2RDth_0o!~-p z)*@y$yh%J#F?3ku^}NW|ar6&gVFylfQncdvx+9;pXA#SArBX^U0eS#L2RS~R$eSHv z>Y6l$q$x<9*r_}?@SL;$cjFB_7W9a6yLCCu;p-t&H21X4ePiV`uIlI^zy{FsWqpRr z>ysIjP!b%k4gOOCnb@jEd-jU0@{jCP9l0=9Y5PdkK&{+iwRgOpm-eyp?fyq+r76&5 z?aox}Jsx#z3XgE&;$bOi%f?N#8{ch0@S;NsXj2$_-h51U?4 zQ386KG~jEatM@A3*bWmO`0LvJdqu4|e|}n>*PmymOne$NDEMQuFuny|{oOlL<-hRi z6x{&LE}K1day`F_*{#}5b773H>oBXU?VVJjKH4h=>n%Sgofo7-oc?WSoD9QV3!)j- z{9)*Mq}tb31XXmXYI+yl3l%1g3!)GOpUdW+J3YAYRvhrG-Y#;u~p6JvG3z7nY?(s}ZW}hQ(B-Y#{d<q}aJ!r#>YgIS=-BGcojaob6bl9EaZxS^99XdGPV}X#Ie7!Lwz?C#9bBf= zYAr|X<-7*yQ_;u28{IS~El)S%<>v3V0D+y?I{(r5pn!Qz?AB>n_4w%OJI~I)C=Z2Q zNwGr(8x(Cq26?ghtj#TRJr8M4KkU=Lrs}A8qk^ZZt1apMS`c@V99D&89fItr6nVLL* z_=IN^7~voXF$w8^Ud6K8B5Ct=7LN?{*2|zIn{7V*Cb_}x+gF|cIE6)5Xl3T+kngV; z5tjGoC*@e!I}f))J28}JgQK7H;d@li%%aL5#8kwy$qnEEqPyfde+-|?r8o?;2BSOXX{=m$BK?NVAS_b z`e%MLpoZZV9sQX0zU*~3qovc2%(x!wR*=-v__H)PmX_v^fu7Ev+Y+R4&hRVgZx78p z$-T%KtUO_H>eRw97;jZGSbEWg-@c~Bvw>ndE5$Pfev%zVBi z{=kN)%F`7tH!7+}F1(oYW}#mx!%f;--Qciam}}Rq=*_ATnlcwAsfhPo=o+^--Jx4Z zUxRt^BgQPfx6HpgR$Xl))C*MNxf*@9?aXx=R(Ir9;%C&yF0pdPEG>Pt z@!P(K$t-PfEOS2giXF6bUT#pLFJM=Hjf3eweN?QZZgM5oZTPX^E?<3h-!72($kf3d zvw?5!*~Yj;AHV!Ou5*Q!uJ{HfEwu7dWjLx&!2ArkF6TA(rKhyGb}9Np`<$Hg6BW)e znYBJUc`n)~cQ1KsJGY(MlAHd^E$e)XPLeOmxD)RKfM&g6kdS{3_OQ_R?Gr}+cxR`o zrCQH3lYrlygQr^EtAt6z+>_p>C%?0SIDo|<{ z9K9xnz!ayc5nNa}=(nv~vwt7EY2wOzpebRbKS?!|p;o6R4YQhKkn45?=eA7u?;YV* z4AL+?Ix}nS&IOem8UsS-q+iCOVR_NzwtC*=gDB{u?!FbEnzxS5rD0U$=p3VK+vx>^U0ZeIqd_6kZ_*!P-16?tbp#RHeI%&nMo*%+mR;pA55RY~&5 zTW$I{I!3XTPJ`|x)rf?U)$g1ugI;6bh=l;b*lc7TdB#uK3kvnAp9fgB^hdP|nPSnM zAqZMccNQ5=_6!t*ndySqGgKx?tCK;cX~D&| zu-bq`T%h!8l!McLAM@qDHz?}=U&WXf@>3Y0AW3L1%rkb*vm4&IWXIgArlbhJ36pJ^hI`kHAQAO-$wC>y!{NO~` z{F?Q!`*q*c)@Z{Fx$&xcw5$8=`UHE8V_fX1$P>2ILmm8xT}5`aJgiPp!zN_Nus2Zi z1-CRPGdR?yF=N^|^$s8S5S`T|PwmAQ8p(B0@I9N{x%Zd=Akj)Z3&kW4N+>g?j6_OD z+tw-1{Y4l)T)oXA573uqFRIc9>~ntD1q7yh@0$G_BbrfKSkf409Evph#iCLiB$#lw zj};}8vB8KWvEda|)G|8?Ery17C*uxN@UUayQXp-ahJaRF9!ZLXWJz1~ZEaqC8Qc{6 z83lqZQ_&iD&vCDrE_4@Jy6cnc-5;?_SEU{s%d}~%Mt1B^mSfww+@SX5%SF(e#B`G0 zi*KLn5UPZjhD%>>Gzo}9<<5*4F+!aAtVuRqG!NE@JY$gxNH7)xyOhK_%44D&lo)Aa2A-{ zJ++=?CPs;F+9Suq`uGytt*pm`a|1rAx(qn{s5%-EucV^(r|IGf4@;#K!loeRPGnad zTe^8y<6sa5Vw6-Ps%vauhq=E#0Z+CX4=o}6$jAg*NJ2(nOQtZ-o;LrWds+?0b?n{i z%bTp1v>R2K)UVgYU8O@)7Ke4r(&uSX?hqYM!?O#x+Rsj%ud8w3DF@)oWYl6?3STU< z1TMq|C9SG|Kuc?b452@g@5r$r4j)fiWc~t}HdZ(UnFgu}5QfAeisOu9T_yhH{mg*t z%fI^O8;cefw9B#7inJNFE#tF3G#fWR+*DFh(qfPw3z726h;?Vm< z9*A{?W$FZ+_CkQ7tWn5Lu83m(ojfsd+W?^-mRcG8(I=mku#nD#ynd!HwFXCjxf~By zH^RQj1dgbPj_0(m;RHe#ZlAP-D;jXri&V%46kWPz@=qP2LJ6r0w4ZHIk&Id-*ja>+ ziIqZC#XNYd96w;XVmuj|I%LJlm5Yfnaz3YN?{za|UjxD~V^W_VVEUd?qj6(5D&Y}+ zUg%U-alzo`cF; zAF-#W^=Lx&^6X9XI)slhY7j{CdvUfCtt+P`I7&B=T(N!P`DY4$S?mnh&}LooxCcT^ z0Ix6ih!wLmiB zh^hcYh-z3M>=H9icAd!nq%#dx7u9*Fk7B5fL~CxL)Qd9s70UOJ&z7Jw48eQEfJJEv zeO-d3?TZ((_d&8GLI}f}G9Dgw z7H7s|nywblfBoeOEo^D#Q(G(MY7!OsE|T|9z@8+uo}c$AY$&B z2%*_b2ILwY1_PwLbe36AiAeQF_mu&($41A}c{^14`Fw;*gni`+eiJQN~x-(?1 z8**@b_v5-nq^6SGJ28xE_G#DY@=G&M90DtCluf`#xk+tWZg)nqBW|a7)S%70_;QXK z>ql}25_Q4g%{;R6u5x=rC6xk95#4NC=GVb!geGMMQAN_liW@fKmMRz;y>U%PIjf_qgP9kRdobC&1 z+mo3|G9=0njyN$1yGue$!E+5DY(IFqxNWR`5L4pJq1%0vRU{IxkF1_nE4B}x7KLQ9 zFaM#}81>dZYFNjUpENmpk@mC2M6E=uC5~I+LBAYWcSZ}Uzlw}pVi4= zlUQ_fG%U9tTd%?0x<;y{%DS0k#6q5JzP%h`y$&5Fs>uWu@iZ3~19S7NGvi;o{0z^Y42Nf4n!pi3X62GsHr8 z0@lpq-lRTy9)rqy<)vhTa5N2CR=C*VQ)8ySbWE*i64Nk`^1V2Fo2STp?$1@h{RQ^H zKKs*ztqAqcwpDW!cJ!kMekG$(H*9!Tv~Humd&;B3P-i8)wM*4+;jxK}^f;Gg95XfK z!Npb^R_~TpHF@w36*})W3`Wz4P)w(jUbu0W2$^x`UqGi-HX=M}9f`}VVqZZfU=H$h zCxXxrp1f5c)NFZ|`b>hkxuHTQ%7xuG;e?luNo^toQ{h(& zdm5j6@xkzBPNbO_pU8t72w`2QvKO-+>M zQE)st_5fc9B9COZ-MXF4Dk@aNgQ2q3_}+0}wA4t^+3GV!ulHBlt_Ss+GLglWDn|ir zY*PQ>hj&9vKE#&a$=mi`L-o=7t4}V*p5FeQ|60%LU(NohVSAN)H%p=V95p9uMxP|J zvQbM{9#COI>7VnDx3`-8(SKPPXjMyKFNOViQsz&79Kru(H8pLj)~idY9+b6N?e=W)j%MOWAb;K%^i$}W6<_e5l(z3KNXeDBR;9mLX5QUTXk)>gb4D>A z1?u2~jb8oEC*y;C!5m4&OB9|2P=0k`hLXwE+^mqjO`dJ|(oexQwpEBkV2w%+N}anStBD+}lR^wS}^RqYrI zq{m}(V!T<&|8KXdA%g!GHTX79QPgL>Q;(*2;=2IZe@A1$ZO|&O{`j|dgX0=*N8{?< zd#yS4l9<)Vi821)CEfyq%C9v$&62|Z2tBNl-M`DU1!Zn{2gUyy?|Q@c{>M$RK3p+~ zY;}SO1Q%k&=+%wRO@G(i7j_*IE+Sw>WUC6Mu)(0Zu)I7R?X0O^qa3`+-{s(q@7}8# zJ3vSeqPjZ!?{aXKT>d-Nez#-j<^Er<#Imt#uK18~e^>gZw0&nu@Z{JZ^&7|9JfF)Z z)u{PkQ#V=L;_o6_^Lg*$$l5s{q{1)^&S$Ca-S#dfNeKKKurKdSqiH?2_oJpE|mTJ6RO|e_jH3)La^Cq=?R-(X#RW8Z`c%AN#?H@T3wk-1Gtm)C|ql zKU$re&g8E_4uuo+q?1?!K2dz>8R_d^_b+8faqMol==zP}26k~D;K*<-( z%Isbxwqk8m^$&p+iA<_Gxc<#}G&+7UF)kxb%i<$2Hw})L2&iNZYYf@G z0GiMv2f9`-l2Oirn}r*w+aWS>s<=H!$PH4yQY{EzSW|NSM{2N}0K|J4Rb%QG&|PX8 zaIopzp|oJ#ysV-op#h}#+bbZ*o;syGlw)t=y{Fq{Lzr_c5<}4^H+wh4{qL?9RjnmT z1tCng0Ls7hIGQY#!gjX2KQE@D=chUMrXXXao@2R!KbrUM+g^lr5GCVt1+sNcR#DuDH5N$DG{hrb9`xW+;7pJ{y+`X|)Qq?I|7}28z>W~)O1#LK$B%Q%u z(pbRq`C<2pAnFoddJCjVHQI}5@)!S_1uQ3JzwI1d0(}>_zV>b&I|!anwnBw;#H771_-b~nPvqf3kh*guiycYCb zG-~3BkG)kWoQ;bYE*u)Oq0QkL$A1@3Z)nIsuvhaPr!dc%QqWsPep#Fj1v#+I8E|yQ z@zG)+$<^%SqM*Aj#<*NyOG$@TrG9Evsy4kI`(O`kHRMBlj^ zRrzJwBS(9xYIee08urGxH;C+vMpX&7JnhL#3U$HsMG6nfEW%T1**X+mcE$8Ei+u{j zoRV)tE-JT63UX2F0#-8j2$;JOj}cIUhTwe#;~I0J8$TpT7D-BpUIS-_3&9ok64dLS8 z=wjnrxzuv$f-s3>O)6-Qx*Ah4^DezDOfA+`kIWafmePllW&=H_+`D_&T>Pb+OOO%i zVtEBFo06HSm04$==w^o6OIV!iYFUw@U6rUQ##^)mkMTSXvw^h_>tHj+Qo*Bs{Ruvt zI1u8O?7U)gOg#NnbLN8|A8tvQV2OzKRTv<5jRy7G%AY>j%M=WZh@~E*^WRaV{&A>E z<@eJr08hMRI%TuG9{*__8I4Vt!Zlf6-&=(?P8h-gQI_EyAP)92T3uV)48TZ1pr(~C z43-e5Jlr7pIlZyHZ*AZkKW>-O;nvsl!1SCoK-p8FLcU9I3lRNeJ(xHzB15WWA;pX?Wf#cMZ%gq z46xtFrXC7Rh2GDEgFK`AP)qhV^N&Q!= zNvD^E*fBePdFk)sGQGC#LK({^r#UxW`YuR`OA&h;LwkG_O$B!o@4XX1XZSHs#m^Q# zZIt=6ntE)nH(X!7ZDfjPlpw~Uv=x-Ks6|UTw~_oSwQqljpD*F&U*jc|8BAan#n`+f z(0#1=gfdgXel9I?&QHO4^~B_FbLbjrLVDJ})z+To=Z|Pg)V#F%WPXaU8LBU~o7?^c z!)wG!_>^nSVHGherYYxhzV0&?Hi(?WNj$uF&3pBWw&O#ERGE(^X137i%026AgU{D_ z&$x=by7oJdc)pbEM7=5=cbTcQ;c1tFv)>lglF8X>oYJVttpMDQ{kvXAFl z5@Hc#0x;K*pI8k$s}bUU54XyoeLsKC4g=xbVqeE9$wHm&v#*QV(d$KQ`GIpBP4$<3 zDq!e!#Q@%ezF)9hJO?hktW@AibGYVAeO-D)fsAHzMMNQ9uF`A?z1e6Ewo>q*)y87l zL)3`Rm81=7i2#5|hleb!%m_(cGQ6e0)0$9_)GDr2Ua@SpE#uTrAb&38chd=Z*TjH> zv1NTeH+^P6gKs?wwkM|}qQxY_KZ;LPmf$6b^rTe{}&=XFqd}gU<1ovs+ z%BsSNpI=_)3x-cNEZyD1!1L>BndTu7=Ki1O@w>t&7;yKd2}?yVOqv#!v6}7=Hv)Om zdztJlE;&5t1px&@v;blis|F;`;;JFj6MN4|h&`Uhs@t4O6aMmZt$ci$NToH;R}g>( zBu?Qp2t^re6}Odi_uJprwH@CB*>QMQZ?z>4w}S6}cfR;2v;f(XN`PNj`4`%_!wnL* zpy*sNt9~_Y(miHr_)fXfa_p+wQ>T=aek$GK8Ps>ptg-)A3Q5$yuasMQAmg)l8K)R)O$nCgeRk;bNzZv9nIeCceQ#BBX?5+TiK?!pE7y!O+I+gu64Rv z+-VaE3SkzDOAx(M(I%fy7S~Y?8;z99#M=`b?+0Vj50{S8pY?4a>o}8#sj9Z*r_=C} zkbT{tk!orCg5x-6(9c7wq z#hY|b&?UmEmMyj^3tzns9n>6U6tnZ3g%A`@W&>`KQl8OJKHc|?Gkf+-sdtl6KW7kqD(=J?dk=IdOqrP7cPrjD zu%RS?i*mDM%=2f@GHH25+KxCR#g2B}k#l!!cT7LcX>$W=#v+$`=_g7{&gk~AM{=s1 zcLzClAw(7cLC!|GD&UuUJTFxZn$|GWcB%Bm`9Zptg>K&qoqqZCQs_+}h zZ(t!#uBhb=QkKVUpuWY|LeKjE{qRnHojYJd1F^1`o@irBjSFKMs{5$AU`jJ;n8@Mk zbHyc1ba%ME4$e((ELaNNoaJz5d+O`Y>XOeL>#)nhR<|A6@#y$H9mrEGoPUAIekn6E zb6#g5nXzS6LefH?z}e~eLgbv{KS?N=T*Ul=e;adtaxTd&X?f^ZWn)_v`gMGe*lf-}70n>wUeK=eCT~ znGbgWAM8Dhk;?F><{0p__GL-&=XahntJjSINoV=~K%OPUz*GiT2t4-I;5FL7;=p@r zFTo5XIm5zxe&xo9?Gf>MKW1fZ(cZ8dZ~cv@J;IdpszLT+&>YE@m|eeKoL5AouDe!r zNL21@SQ|!kuhFUB!ueR1RtW7p-Qd~oXM)5GXFfMhM~UDeq>gY6e#S7$+N$*)7bQQ3 z{3P0#2|I+vLVBL`mO(DV=d_pVA^IblN(urXYYqxfXSPlOuTL2y0qe10w;?S3skZ=6 z3nJFZ1n1x`rYWG6gzppP>I`<-JZ-klGwJD0r_9z(tn$UF<);fKo%}WqPcbL(3#+w8 zftYr8r4#z7tjJt(ah+|4=oebt-*MJc|2B#$70T{>WlqeD3~VBe6yrc3647Y1nlNoe zYJ>`Ku9_i#*9J#h3{d1YpF3UGP@aZ`j>g{LLSJ2L;1Ks-zq{$Cj13oqhTj@CL>5>{ zLGeJ`1ytiHL6ytFnMGXMOOn}PpWQ?mM6~HZtF3i5D@`mfEwN_4yBH#-hJ9L`y{BQgF*V=FFCA!idUU>Lf?K z0d~R0R*JTv)KcQ}QMZGm=`v>0M6S?L_5P#o!E7&cAP!1~oce0LZ&Ks&e9!%w(X*T5 zOFCx#mH&vuZy@qeYrKcMdxPU)FsI?{72x;g(K}KC281U)0IBSRR%VFa5pz{+D5zmP zTTrYN+$C9pXFd=c#}v}{_b@NLLV@ndEkns6G@J+BD(H>%#d!P$d2itaS{8u1!a>4KvdaJ^v?gsm4t$7i723mLhD50@IBE1R*WnJyf30D~ z8{IAoZkcC3?p3R1!4&oJ?R=5}x#y%wN;rR?w++Tw`+~Vrlje-2Xq-BX#sX=^i8WEg zcdo~aX#v7=KYo1{fkbF=h2r}l#{~oA#oZ7SUytx|9h@HyTk3YAC-=;65>lQ29f4%P zT8?WWY&(|7W#B&3p#*M70X__VSloBJ!U;V%+yoS9|1-Ce^n!+9ZP9SEL6dOEboB+6 zT@F}WS8xzm$qqfAky43ZXv?$=rW1@=2w>7p9|nO3%CgB|AfQfq+Tp%UJQ|9(x$qZnqvci`eYw@iol@&tNB1*U+@pe?`=Bm>EVBd1X2GeNW$ zW*~)~Otk78u_=g$c>DnqR9XYA5j?V_>P9@O9*WpeITi(XBO|S%7qOG#iKmZJe~FN- z1gwG_QF(LA5HSr!sTccZY?CNd6k-|ebcKTwRopp(+nvleeM%(7kuCqc{y@bN3H!Ss zR;}WR<2ix5TR}<$#|v(Cc2lbyAeO*S^|kIGO~^uQTtdLiwGI4+;gY7Pkffbzs!jCF zoE|Xv^c=pRi-6AFI8)f;OAt5<4iTICLgpNQb}V7Dar5NhzlVIzfs|tammAyCu)fwx zB~WJydL*5CTMe`h!Lg&pn%P9yAjuC=x!dY5)33)#tBNC$TZYIVfSmoA){Q7N-{7u` zL`&6>|GK*goA=F~!*_^4h$Ns5BbzCR5XZ*VCcbHkRm&a7I-b8Ysbvl%OK&rrT7aai zJ8F|Il=fjKpg@8VA?wC(X-v{-V*<`=xZ52E(OeXB3J*`Zw9u5y-j z!){z3Jv8{im3fhCxG!t83~o5D0iMEWvq0CF!QJmT21={kD9U@~&kzT<V&aMGmxC3SG|}y`S^(#_?u0R?6kPQ^F4M6fT{ z>RcPd0RiAnE5JJjMPfkGjE}pqGa*}f|2hT!9G9W*kHrzKfgyOMsV2$*c*E=jAuK}h zqRUtaREa5h2{aMThvMH-B}=HJ_^umh(QrY7GrqBC8ovzip-quZM=%?qTA-9s3+kG! z{4}s>9pQcHb*d9XP<}>Iv(!+l5aH8M-H|CQlu#V}9UbtP!TBl!DE=TgOem~28ka`M z{6w*WR88Wuk0tvP>|kIH-sGkXoR(9jwl)f6q|8$>o8Tg#JU41eW?g_GW>pz!DVhO6 zDx=r{R6&f;B0HL9P%m$&A>z_O6X8PT~6 z7ye02MVjoaQ`w94u1vPVa32O*=n}=GTzIlxqP#Dx0bZ09u%)CJDIlE@7`u@Ro!DD5 zR1>r}DDVRpLK$SX8ngIfv6c}3IN4H?&mTx;m!JMeBR8Sq0Q6f2KVbhawOn^5J)W#%~0v2Ha z#}?Aj#B1HZf$e)6R~X!sqX&DLymoyLHHXP=%kkqSX#z#*ZNqG0KkP%GP*Q>D3lIAi zdNH&U_sL8HWw?F?(wT&;aqD3F$rGRf^p-R}05sUcLTHYMi7x#LY&id;%czMYk~8Y& z6M!YidbJ2T--5g36i{PRu1!B1XMH%X(E3UvA59v_64My&r}PcfN5LN~+c*o43nsC* z$#52e4Q<$-;ja9JL?SqXDc%YkV~*e0EYoL-wlI6|j9_t`eT0GNYmDe*O28Gr_+HNb zo$b5d*pGT!hISuCa&{d1_E(Aaj_D8PUv`Xs5m&sU+bkfn^*p9VvMt*lk8vKq8Wv5k ze?n=r6#9Zj^Bkl5r_fiSz5c2=>ogrXG;y2ZpZnqPNq+>X3kG1I$I`%o6j-41;0H}; zf_DV`E;C_*Of&vtCOA^#fBZ9Tag3q0)^n-z=j~_R-NC&X|I$gF^Z$m(6bj?=k-g`L zP4o`v;P1%DUjYVV4|@7m5I^8b!TNeMxqtulG!q9Gwi_jpgyK9FXXI-!?^h3OjEVFI z;wMd3{-~M$|MGnPh4=E|e?nxu?8G9-jy>0tJKX=0aCz+|xI6&|k9o4FNq?ey{*jvW zUoso+ z7E>$YUzu9Dg@(x*+S=dY7o7ak_}hLOXA|EqCjQ(H{C$7LpX)z(Z|q@?|Mj+b zR*8*2JvV|vo#Y7L{oSMeN5^Tmvm&0E>SKiVNt@4(@eF@^^Z&wy{p|&5 z{Eq24A$s};+U44Al-tKUbA^20Xr787so;sjcSlEb7GEV!@ zb)Wpt#K21P6)67IEez=8Dt)pFw-MVG@3~0u$@31hfZEy;s@?5nH+IbcWoJd{5Jq7ik%@|?=r)wgtYI~rBLQzTTKS(njC#%$b8{+- zahpG|sB=HJP#_+!vkCfCCG<3CFr%Fvm^#Of-2*R;%2MJ$ptMF4um%+e<@4cB2SFDo z1Iv;4mN>GCx~^VygL?5g+Q^2%4xl66s18k!?b_A==oUgP+0x#HFek3c99eJbI;a23 zIA!mo*oiIX%Ry}-kBf8@6wI4AtKGpiK_ee5h)t%|o6*j%&vqG0cp~#T(h}G8 zqK!C=in*{9_{LdHsMeGRo-KgK$e;&U6UB_kD1wX->N;pZ^83+&VmB=n!|SBZ#aIggHzjFI z`aA#-K&4MgWC+LMbG(k9pMRebaZ}L>9=FrPhoAx!&L6BD{BoopzXV*gV8H@iV_)?% z-%j_|4|P~A&2VyZYLs8`NFHsHRdBI2(xR{S-tj>pEjEhZd7SwK*_Och0;?xNEpdWa_IvV46Mvzb0qy99UvKXl4DJi~W=)(& znja9OWvb88hSn1|2&OGd`p_E50gH`aH>;AUcCZB$#*n8jg&3XarTJrTy#vGCR@TdN znUv(t>{DE%d^LO6;SOvuOg|Zdi1ZZIB!d?!^S@RRa>6t{KiTr_Z31%+{SH-?t(7qGwQa1?R4ws!>V`d)4 zb0D)NOT^5WRFHx_PD9|F9*XtBak5oj;uV^3W$8t0poKY}b&5D>${PgOzA zP8}rDk=S%cC?G<=LJ;eSc=54@Gzp`U*^T+;w-;b+HxL~yXBgptr z#M@4NSQ`u~i!ux+ENWs`pN^HB44{B$D>m;QeFJLDZDJOK#)v^r6g?ZfZv`o7Ac7PM z1YEZnw!QR{&duk!0T7OSAV4cT7?mV_yb-)#nwHX04Q{|%p!)b*e+;XT#BvQYgImB} zfJ0fk=WZDqcnFM@h*?Biz&8>n4z__49A5k;;_LKO=y;3OOj5j9oo8gsi;zw7@4z_> z^oAeP4l;BB&P%d#;s$%v#$af?DS(X~rvfgK9{8qHl#*?Ju{w0Tq!{bh zDgfq(FMqtgT{PNs>1#=B?NB-4P%!ZYb#gwTOh`?~>pTIFJ4e44bngSeB01a4K$gVB zD;D7C#_EMgzJM`?0c^mr|H4=A3vD%@M7$mr?GY?yo!xAVDhk?x$=V5g=OC^*W|V&M zuUOMYuvD`3TAEV)(@#HzpZPEo>gfE;wUm9eV8G{ko@ic+q zbVp2Gu1Yv0X2Mq%`6G{;77Jh^0!}QA_-#|AL9@k?u7P0|(Y^7*N{;S)jtZ?CK06>5 zTCum05fb3bO-S;A&Gv30a0kicT*B2spR_9Mt!kLqx$tqzWhH;%-R)jg3LQWk%G;{p zkv%0CR3wA3fD46!1UH&&aK!CT?@eepbM~A$2f;U{_!J@sVcJD=WS-(6f4Bi;e10;6 zLe98QJ}O}BYE!`l*SwMNqmWaQxy3L`H-K5*kM62Cl%$NBLIu=Q(hlf@*TT^O#gw=Uq({BE2~oaCYT|}kjRC!DC-YBtcdxh0ZyMp*aCe( z5&HuAC|UX_V9WRl#a2Cf2$YYz6{|Kb1&a=ry0V{pdyDQ$!DOMX6Ti0yWy6wkS^st1 zZf4ku-hvgqA_Uo0iW=^Kwxem$JN6nqpm8=11)-_&Cbi`DMqdKp z7DW;Tc@mtBwV*F{Z80D%8X9Bxm}5dA72x9C0hf;#Hv>K$VComqWLz@HKAD8lX^3A z1Dp=nFkX5)WH2H556x;&h?7o>1Rz*Z^XJW5DU;gmVEKkFz*8p-9;#%RA{4UwCw z_tLGci=~N_LsKh^a6r@EkZ9lAs*Wq_AaR?N+?F6K*67j6ZxE<<5X2m^O#=Bt2Kii+ zTY_tlUxrL?l+ogV6t^|sg_UXCn+Gv-@%hXQM2ejo--JXeo49w;ZbL) zDQYu5qdXbMu!^X7MtUvW!gY=~VVP|p=A;U~On{&aG*Wc;iAJ04abqKLY*G3*&!9OW z=?c9U(9WCg?ne8JnX?s&vCG8rZ#vn6{X%=(04EO!P;V&o9<*(bGf9*zLvI}pf^eF! zqk6Pv7+uM0n7`h_>n46YYTiOi!=neWffJW}N($K`3B>@#eS#SW!y)=KvW@4G=Pq0& zsRkg2DWoQ9+_#L(PLe075Ed!f*wWa%rWeQr7XKBRuF8~P%OSA^;ER~n>6I>qHauN? zk%&2`$+p+(ABASZcMsPc*(mv81P1LA<@1(cK4w&9wfO^lywJK;sEr)Q>Z6Gih0M_8 z4%J2K7Y;zkM4l%j4V>%TJ9VC+A&ntojHqe#eoWrbLxmjO{fjjfz8F`d38n-@yqYHX z66Jpm<&<<*BOHXzSB&dVKzFnIj7J>Gty!E~UN4&22eJoF^gGb-tlY_uTQwOxeU#%; z<#*1a*rhxZk`Cab%(G@i?~<|s8<<*i7rZoz;A=X7(=rImEolQ&g* z`+K^>@gzw*gKQp}KI8wMzqfmg0UgO0amhA=7P}wksuwA$=qu_{8*zERN-CBNYU-Rt zTn0RKd{hYSq^pT)nbs6dq*aj)4QhIFHzA22#SWQx`Jka%MhYTIi*eUGM;9JejtcTd z(8S#ca~+g9j9?D%X?yTqNK|*1;fiGil~6F30eY~-lFt3>*E57buTOVB1Gwqqxb2=hv zxuF*}6BXL8mpL+M1lLb`Fph}*Y(3-ww>t7cqaDE2gsz^fQ8sD^taV}Y=ypE?=O|)w zLnaYbV~Y=-Oe1NSO1J@ixQO3rHD|l>FkpniIe-4~O`KSCT-o#X1P-8~7)x5BR5+ZJ zNt)|E;+!Y$0eC5woA~6$FL876bslvU(<9t>uNfGLJK3pkNQj*c$z=gg7q58_zS0~ z{C&_gs8Z>qXSo@7l9KEKswhQY9cKq6o*JSEQ{->0c1BzE)F`jK)<$ z@oH#L^|$!$#C_LI4C-=G1RpgiHvqDt*f=kq5A;&OG2tuiiv_576`rX$DM0NOw~OaL z^zh2jbZAOxWb9LtC=O?+tw?hY6nTQo95?1{ z8CbUAp6SMNWe9WZQG|IRrP7hvqEAVtI9R%O-OaV-x+0QdE*C(fLgahtOB;r)mqG&PId_K}MDlLDEdcszxJ}?z6+Bc(H$T zCM9xh9r);bb&HK*fY9`69MsrtTA@X4^S~q|-geqsU_+4lmc-}yb-^vdy=!QrEJjYM~)`n;B0NIAtXEa=TT zT?7&iTd@xd=zf;;w-8i;^~E0;kqgi=UOL)z5;$=T8sMBQPf1t+m_ewf9H*w2TZFun zrU)bL)5FxT!@)uDgQ&tq`|x1h<*q78UacVC0)wCHX@a*~+5r zGYX!?8!7DF-CUNRW-4ypK`w1rXXF4)^{x}K}_X_`~Gidu8N%1 zdEA!G;1JP<2noY!Lxm-7{qb~S0vv#+??H$7;<5&rl!4gULcIf6zMhQrZ07}qZ61(8 z9YiDKxaMmTW%dSZ&N!OGtTf7t620OMWoRf?sh6jCtyiqN29QCYJ5oNY20y?!ED6{R zbpXPM%%g??OM~RSttkoGjYIe~4^7^P3BX7V{EA4>jGo#rBN}_V&1S0x7o?n3%oM8J z(~yxvnT?ATsLICyxFz4j#o4(VTe=C`CV-0&N-UbADf%`}j*fJxJnwWxAwf02a~Y|jkGQ%C~|o)?{n2iuK$sOZZV5=_jS5JyF5 zdwmLenQ~MlhMTmhg$6JEd9P@s^FSw^|A`N?fTR%-TW8a0GDRbdgs~bKWBs1wXB_6z z8<<_^M5;hgj{uJ0P};Q5RkJG_Xs}o1&!7n1pyT4c2@cX8WcM0^Bs!oH8gVZ$o@0IT zrI8?g3Ul`<*~r1c5&WJW3N)xPdyNjsh1EX^z=50){Ij{ad6X9oRYhEL&zwFDJ?utf z3i!Z6*|XQ^!wWgNj(ZJ~aNJT!WAkYeIa*&5Py&yrOZ((<9DUE*H}4I+w_6bYM?h!R$dEPsZp{PR`ZI^i<+>~z&7N(0_@dP#)5ca?dd z81(?gE0%Ht&T=Wq3j=f#p9xyXaVR|&kAie-dX3*xnRPy%Ngp{a?uFXZ5JSKhdko$NR3tZDExZe4 z4%D=lX{H!bKrxMmvMR)@5f=(-1x0SzD4QgFamCi6UOq-_g$O4+=xBmbYLHh-I;@>Z z=h)GZ>GyGGLR=92=0m0m@I;}-vw@}38l^S}JY~wIBC&gT$MV}kA3zZfhXibo52E}I z0cYQx-hiPQp(3v6)kqy;n`C_)>|n{+sZCJ93Qp1)Qs0*fhx9tj z!36PR>3Rd8nzde?4V?HIE-Uu((sH(90Z3P296Ur^QF_ZDSe|1vrLyx_kC& zjyw*m20{2GoCHQW3l#9nk}{-R+&!%?xVn{$-VKNVwRd!zT@J7_&wO;l4Pl- zI{k#332g-2xZ`hw*PXq^#l;2Mbx!K6s1O7pR6FzO4%3QEDL_ZQ(b4)T3;==Jqr>fP zT`nmTP8(#eXPStQUuSL{U|V(DDr=O}2)uw>&;h`LDBWU5zcfm*rzB~6m)F+TzV8Ap zp2Yisp)$5f@LeG@6I4BlAiQNWxy6#TQFkV7EfvmCz>^FYBZ=j}TkE@kg5;v_rK*rg zD`N6&^_`cB+ZLS2TMJ`jNPDL!CM!&uq%|z_^LFK>9>v6v^@q$91VgedMwgn%*_wqr zHALcEa7TCdYx3Mg703%kGiGO{D0bpW{6a(5wAFT|5p&!l1yni!JuePBkE8xJG{r=l zgL>jMn9J+c6S+!--@r-KP%DLV)58Vip@}k57%^|ng>qlD}U^E|Q;Q(|&L4KDT#sqbpg-YtvNT7jaMqRm( zuyT}GH*w#8g8pMa>I5b41nnv=DS)kSOX><8y}$w1_ALjz(f(NwRf`HxWzWZ-G1$~) zSev7M6R(bDCeBC^ii}7a_PT`-r0kr{GPboY=rIA%=G(IUC>_=pl$A<-DYLhUGexl> z_ah%eeICebkG0x!X99{$F;b*(QOno+ZP6m>Y&dMmZ-xg6`ge`)rb>=2)@vD2XM!6F zQ{D@PFdJKNf)i6a+hy-)U&aaGC-`)~t+c*anl&vWBcmRoWxTw4U3FXPx)EowieZvC zq|l8RrXgp&obvMW_pv+CGi}(?ne?Hc_Q$~CSU!wp1tXCulv{e}euy_(QqMJQX@j+D#xSQ4tIi2@6W#Gxy7aNU@nzNLh5?g# zOyb^SgHWblPTfYMEq6Ij*lfBORus5B9xid%ZN*K2GfJN2=mjrr(1ZBH((o{H8ITar zc=N);cMT1zp_ioz0s?>bW894qTi;BheT>XCgc~Wz&WGff3{o~s)#sRXv*Baw>)^r9 z*>lLqm11`>+9Wq`G$wq2XK*{x)Wi^#4e0o0d4cI**NRR(*M=~Y^m-|BTMiaSEmtJ)lgRo0z2UzJ%K_iV2-n_wi6wLHK0vIRQ@CFHq z1iOCm3s}#Hn#^FAEy?1w`~dLJw%lae0?y%tBLQ#(NBa3lphC7^NWoN&8Q&%ys>7=R zIQJs$5M+JPOZC%3!dDCGPd&uxvX9}qY>TE2o*~l`!S8|0WoZ|hA3CUB2{sO6)P`c1 zS0TS7;BZ)U0U~O&VXYKtF^q1x?7Tqcm2j7eOmCnL3Zqk1AVuoe77{m08Q7;UUJuKc zT!KZToA23l^&Q)ObZBtPP~Q>sdtJ#*%ZJTEp73EjcIa33=$YSHu1snYIaU3azH*yW z_kYfMijft$7|R$;u0Vg8XY0g>`UP4k-cq;kq1F}Rua)Qz@o$Y{?h^$#gz#G+Vg!xi zl%|Ai0x>a~J_y+&e1Mrn@hMxo#CeNmKB=8elYR~z`~_W5lo<}7Ec0)$v2Hrl=e1njIjwRMzcJ_mv;sazZiFRKoGn$UZJ~7JxX6<6J7jev? zE6cogqz;CxtywC6;r^wdVhva4;t7rk`<13g?o7UEa6qAhelP+4cH~(Ptlhvaohh{l zG(+)Z0mkDq1zND)0!nt-&sMbxwLbIBsVw#FTkAI8shG_#<``#EX&O{3D^ne-9HY1x z@hddIL~xHGifS5h1NJoumS;!RjMJK;^{Df;+#OY#6zl#~$=Fep$b@N9ATf zs=I}f4V#AbazyOJUYMFvvw~%G*{^pgE~zTh)gD1&LcV#-qS*5O2EM1yo{0&cJTVkJ zAUM}_h(oVFb9vf;xP*v^NUirO+=ztbU}(w56Q|AuK4k5fvO+VZGEc_Tq&D|UzjhL* z@iO2Vq$6o8c1WiixWnnlo7yuISWEhms57GiXr|F|q({0`h^&uD3i(h`nVe&GD0f(o z>>$bv{kb4gl4O7@K}$fXc*yMNP$(Yz3;Xp+zE`gr_GMPYYcls@m~a52LmJZPu%AM= zMLMq0BSgef3MfL`^FIzNDaE zKf&d?3}~03Vj0IT^=Z0R2* zG)-ZaY`I~zVW7gs2^4)jek#T_SQ1aF!2P~}FEq68I(B-7I)xxaDW2hQJ z-E5N)dEi!eR_;2tXK&DyCxOC|07wjjE!5zuE_~wUqkE18!Fyqzq>b%fjy-t`^YTTh zWbt$VZ8UTFE2J+Z)uf<>COVGbBsSE)1k(ZS_K5(<`%2AyMHbd-L0W55Qy?=1ZD&le z(V&Hl!JX2=mMT$$lBa18QOxVh7WzQ%WHErQ-9d7>?vS<>s}RZD;9L%92en|O%f1Rd zZeBRBkYkX=FdE*%GONa4m}WQ~qUSGIQ2!aK(s*n%_v(f7Q^JZtC24H2BK;dyf-p;8 zM}(uE5%h`2$~V->y+x(~c|z4Wx_&I|6|RMagOa@4*rL-QlR_e|LAOEtGNn-pRk1x|K_EV(oh%cy-lkcEXMYod#em{Pw$ik&ZN$wiD4fR; zlRTl<-DZuLkfFUOPRoQ9mWOvBurLd72Xdm~lZ?d|VR(EzPqGF?BizSJ%I|31_i1s# z`C5?nkiI++P9C9{l&(UxD^r3tnmclqhWBFOb>%kI>lp68g2=L^+LaX*l`w2v>H4PB z2H5~<5||F9CuLiO<3-cx!(tf6dV1ur*fGZ3x=$`rz@uwUI&9Z%FftaOQ3x$O=j^*U zR^Ie`eeUc`G&=d57d}*^yylWYgQo$oIXkz)b(vnk(CHhf$a3moK2uTH>~k4ZfF9^j z7df0j1UJP0b((MQFrD5Pv{yl@3dA3NG6R0G0mXtO8uzTXAt@eRcg0~scPfhfE(Hg- zQrML~?(Era?UW8~7WhS!Qq+!}t6iF;a{V0wGy^yu&p-Zy9!U70tNuR8@9Jznk+lzC zyi(_gbIh{ap1}lcWh{F4b&84R7`!)?i8+NYU!P8bTChfKKkN=R@talLCBm7y=@4#z z_Lg0BqZ0Uo%QsmcI8)l{GpI|6tgeW2S zNKnuHdk_~$R(|SMEC$m1DW|VU2^BGq6&ztopB@A2{wL z(u^K}xN3ESAT|gYm%@WiN6&+%LFhyj!YY?6SrXLb_*NjZ|AvJFAbN)_>Dw^c#(`EC z@FS=o*n^GgoCAfi<@WFs*zH?^8Dwjwv?;WT!s)dhND{maFz~+P+hB79UJb>%BB-L$ zHD%;W*FKYqyN}aMd;5;M5^4rBbsioQ_48mXEiVo$1=E7CFJOXPk?XTN(EL=*I@0(C zK#*5H_?NGM;g_Br$T` z?YZda%p(!$AnyAh^%Tf2lr*Q|s=264- zEWIr565VtrjmR+xmxoa!24v5zDVvYz7`R_VO_R2Ui$=pO;5KZA-lSz>>seD}? zny$jS^7x|tKTMxa0S7MmUef@dUlr7G$5xF5j%5V0hI;5M!vr;CJo0TtfY|PEjB{9_ zdAdU!)VN`mLzK{Ty|dP+h06h4n{Ap48sl!tIdf+cpNj?iE+JkITzR$-If=MtW;Ry~Q9Dw$jD-vQCo!ct5g=;_cO!BpL*9mLd{`Gg zmQ*K`HK=u~e1tH(P8^DNL#$RZXiGC0z?k;4z+}_3x(nhQu4}FWWj(afwGVxHH&l{_ zxP{e4l6E2N4;&Mc)ODjB`KTFkp<={#6bJk4C3$eVH0c`{z+6ulWPq8qG5STpudqMi z-_r28{joJZec_QH%+_XKm{6q_yob59>9iDCUTn8#i(cBPqBkI-H5nP?jDS~TQe6vJ zz@$3uRe$a%S-OIG_6C>7O=7))L_m(9C{-vvo;3);HJjuti6b9VO69{YJ^m5*gc33N zx(T2s51eJd`NSH5jmJma|NEqdl?hUPCAh#gG&FP&jMXya(o#YPT6f|ekhz%-XD{%E zio%nunJ-u}SV_Xk0XX#+>u(#yPNpFxfo16&k{EW|>KE(-k(!uixCY3l1&)aaV8B7# zM_l#CMRj)I5~M~`^A;r_l%o;!5*o4tV-T#qTjO0IvBm+6VGdhidhH=TC5#u-XmX6f zC1o4Y;RaXAa!prGq|V)=SQw1H@GfAHatfDQ6sVwU!NdR)Z$BoAU_f~0+S}Wo`0(`C zoYqyWEM4>+vE++_5T;F*N5}Xe={7{ojm1x2neRf3C8VFf#L09UyA&V80cesH(SDJR z6V{=W+48BiIb1KBC#tSQfn)fUVI|yiXQ41XVCz5zLT0tWkGqg$dT; z!U4TPk)R!LMUnM-hv;r-ew9%b*nrMJ?-rL1-0jO~!{%-3NIgcGiB*}XG}!~b-_Rut z$SO9W`)9BSsiMc6R{_=6LGX*oj02;|{6+TNh)@EtECv`%W80MHQ?N#e@dUC6k+2lP zB;&wbp!gXgfXu@@8$~qe8b$VU*euhZy=x$UC)jS2&za9X(9Nh7z@<_`a&W-#;FK#; zC=ZnT5!C`b3E(gwx+^#B5ZXmlIPxIi=9i5)_O-=fDOS~p7?g0)gaP-P5+noevpH{( z!6zvAm(UT?)E)bA$>fiaqLV|Y8K1+&Wfa|kGMoqGao@~{vc|4utXBatkgD&&JFxe1I94LXC*oQ=5GaD96D2kS_=5trN>pJU9| zGy6_*yjT58bL5d8`DvgnB#F0dh?eE8kmS#3Is$l(BL2J#g18MjKfZt|=*GnyAQm#C z^e#tWs>2Eb18I{}vX(-1Amz}J9{|EbXM<@F7CYH%`at)X=|0LXGEWnWf6(^-!851=(e1XJ2txv2x`OEN%6+bgVM+9uR!y zWQsuU6R0O}sNW*gpg`3e2dtdF#AjGkK;6)du{m<9EyeVL5L_Y_B9Q-58hS^%Xk(928Ih=uQbVr zV~znhP6ZX6CpiA_w% ze9fzYT&cKtV#w}Vy&=<0rOU|%roQyEoJjlsuc(>2Q1Z+FOc;bR2)By!|M~! zoYbcY3f2^c^}!f{qp|D_@RG~do33FS(R~ItSG|I_R@nM@pCZ|HD4k+Z0Nq6Ce?NXj zDvcRY&$o~Zh~5wXfhQqm+)2)$yXjh^2MW@VwoNM9tO#v74iPy7HimFDfnTu2^-Eff z2nrEc3E-HdsV;p+I_h^Q4z6|@c3GC|mkw$9D;rjn_pGpDdGr^pSDJGC>`a#4`A=1J zn43%KDZn1sfhj@Pm#=}v;Vt}>!=U+qJzR=I_%#~Hx)G%Eu^f+Rg%Ar8V|-$dQ{EO{ zMKigjo&08U>*588O8?_%F0Uk^OGJ`QhN7RSq3Cwr5JE;Vy#_){HW`%s47!#I1|MD4 z87*`olG+2e=6IfyuG;CHKIAqFqYjE6gB=W!5@EqWQ(kyHVFIN&VKs33pnxzM&wGn& zYHE7mt9t^aj7FOI^%T<@LcVxdnLR+bXzPK6L5w-3-cRJ`Z3D!vuHX~O_M6a;(D433 z3x(O?=8eFNf?1g)4S7omelwl)+OSE+>ZiCzGP1`Mk<2ZL9ZImoYv%)q*PC6{f2 z4F!;)b=ao3KgnJYzg?6z2|&3kN<+Y}{Ap%|Vw?^;vPS{*5(_Ak+(K?)G(+1CAID+z zy$6ZnQgaw&BJyS>3m+melcokINfQ<@c@$&P#ByCTDpw%ke`&5u63G9CD94DUK^$3J z4+uNEjj0h00Z2VSe02P})Lo^g0t+0yz}$}^Ls*n4<)~iBqYP7u{9_3>$x*ngO9`hC zV8=<9`47B@=opun5iT)}blW(1iQT|woQFI#paaS{KgzLMP|q(;h>N-@D**-e%X%?b(2%CG2NVSZlbaT2Rw|-#K?37uitXOX=45*R^!k2>;EJrpYP5$o>dWSX=D{Y@!J4coOE0Nw~}Gpj4xNqQd-3~3z^#gY`gYV z07??MF8mfH^i>x=7RkcGOYr~xD%SQkiV;{ed`b${+?!#{DV)@_sCBXBvjiQnM(*9w>%>b7t5ByQ-IZ3-uq1c?2{fd9wLVR9YU|-cLcm!hktCl!1~9IfRmj&UyWP)v!1Q_ z^j*JCczMJ#f86k3?8!&(XSsh&de^V}y-gix!qq2B4#Kgrr+Vvr`@7-NzTCfZ zyx+G<)_g(YcL!ItimzXjeW2WG-}Gtpz^;A4A;$y416%?jU9jvkAAh9wU)c{R zT>nIWvn_HVs(;kPc5@~#h0BD>^Lbj>ee@4R>x}*Bn|hBMtW(eb5SU@mF4kdtS8rwgoV7-Qfq`vHzg>qEGkUjwvF`pDr1fv_@~cPm-`|5i z!{#49>WSs|uWN!%`v3lKV)w1fK#NGqmbRr+CEzwOzQE}p-%kAL+q^mdtF5}@kNf=G z5bn-g7tc~=uKY(Fm2%ppy|bm@;0KTiB=*6aD*3t9LF zSDr~<9}-{v=KtLZ^Dp20ht);BvZ-l2@K7SN(A$75Fj>L*M8JOfIU}gb$ZzRizlmG@ z#32M_JO}6H`LT~rD;&+?jW1uEvFXM>qw*^dm?1LtzSAP}AIJB=Nhc7=czAhbDSdVS z{)_1zV_$sI=x5zfn=Xx~e~8R_#KU*w_n&z1zr4SHK7Rjq-`t<(#CjX=3@a-uN$>;h zRy4%%EWM{uxhmTJO@JxMdW_~H=^j5URKt7+?WM-8a!-WrpzO1{$s{o)S`#Yp(GF2* zi%ZlRsl()c4KzVFCf6B>=;TXEks5%C{QdnS@!)XeflGB1GRl21L%*1yq#$&OG(cb4 ziJqgNaDj^lQ)&QL_my0n_DUiV;vQxJF2KS_8ANju&_QD`hFOeZD$v-KhY!~(_Bq8_ z=mr4di3YdLuX#8KHvWKR#HvI2RJ2glmZC}~&jOlKp?On6v?#3OkRP*(yaX}1Q2}@s zGelG(k~5>3S_|yUhrR$6Jjn~uPAJ6;VGvdZ${moi)iB~~2HHf5^^wlYS)ZV6AUw3b zzFy^)B#gCt&;=ObWZZ&ubi4@?;Ru_()gt0Hp@|fW@meP%AZ?2@)Krmb>>e#0xkdo%Bmo4N(8{+)#11~NP1_vodz8>tB2$a75?mXOnGvR(Tjk17v5+Q+A-%+{V+ zR8;iFZKP|N3!2Kr9;-WWj=-Ee8lw85_cN9f*&u7AM*zrenO+bS$0q>26A-%7Wj8Qu z;7PUuYN94p%4S)^DIgPm*dJEzlbClvosIKi_&Fn>iA4C|G;9HH3J_oui4Rg!hh^^J zvGtBWw(-1C<;CE>5f+Q1kZZx0w6|!(gz=LxXC;>JMj;`}mjYBlQdklT21(C4h<}gV znVSS8$u_L`#fw+4D&%v$JI)ij6C2BiPF@yhP#*pcvQht_pxXdE{Q5H~kcc=CI$2~j z0vBb-qOBoPrz_9ro-I>@f>zK~UhUw`J1ozzr z$#DAg>553*BbmMLiwMBJC$Mg-Fonf1GNk{uva9Z;$Q@u;V%ezu%7h~?N3%D!2gpi% zw%60fnN8F-Q^N=kjVsrWpkbxPvYUV_oUF-=KB>`xedljn#xhyFP=m;%C`eG#4A+hh zhml1ADAXBGJMeFcH1X-hGeJM-7=zggYojw6YhvChG*DxXrk}SXz7w0T3CR3GY{`hz z=P#g7RK!^V=iF#)U>;G?gHQ@QKt_Es9tZYa)YF7RvUwY&&*og<)I1i1U9Bn2R_&w#1t_R~c-r^~GN5L_K6-;5+PF9S{yZ)J$Q{@&OP{bt+Wt?0 zSQ zjLBaRLeGsloK;l=ShJO?U)d#3#<6QoY&)JiQcml{_eY;RcxXc{}AN>3L?mgZWy>+0VcvX_ysXLazOS(s_-dRwW)b8lQJc` z@GDoqu~G27Jud?6K^Q1Z(Fq}B)}*`W>FWp4ue;Zd`AQQGG&U1BaU0RJTEte)WdL%?LM#LrDxRuEWJ`7Ku%bDY(pPOhL|Wbj*%vyP*Mt0pl}nQ zA>^#0%0OGBruEAq09J*PcXsJsTICyKd~Wvh1NAV$FoR{yhLKP_gVd@ER@{D1U-4PQ ze`mjRXhu8T@+Upiz+Oz?#LO4g6dEQ6k)v{+=g?)O=E$A;9<~TMYG$R+s} zSFKqiO?rd`DR`UWIMOs>#A}O#ti5Qcp*FIA*+KmnzKfCP1c-m;F~{zN#vuGZwK0GZ z={1S-rHKSDKfi*HGN5o$1c?jXurAZVDr<&cbX9F)^ z+Q;wmladC$P~$8Q(yH3=y< zkN?GI^VP`2Z2>0}ca2rW-n5Dm^gaQt)=wSAd-F^_rUdsF49_=mE|bUw#&2X?i^2#y zBe7Bh8hM;m8+)^Wq0bv?EsgfBnQ<9JNj9mU1GCuL8hHjVarZ#I!WDC5RyDL^m90vN zUy7=srng9#6pHOBiDe<2QXC?#pInF^t8ZM3)hQxiy=O{`Ed6UvW?XwDdHjHQ>NA~x zF{Z{|wz&qb)h4>6%BIy)R_^z@5sL2XIEB-hM}t<0t()=jW+#T|8&gn&#HGB~`r(2( zm#pq7zHfBTk2P>h1u)A}S6BC$uD20XLf@KRQq!aQPcS=l_oUH5J-sL_RGn7b&X@bI zg51BDk%HO5kZttPrecxmeBaKaBZtt$q3TOw=tJ}7Il~`K7c5u!EI4l3KUb{RaZTVC z5GXcW(p6{bDrdse(jHk+p%xm8h=jW-B@A?0UiR;(+>cX@75r z$I_S^p5I!0e;$X5VnE(>^>6b6ywk_v2shGwdoc-z%#Hpy`Wm4tXMh2(l-UB&TA^3ePD$j_0jxl7qorX*aN*gcnPAHyVV|mZ3O(N1xio@)}z{dQC z8S5L`gRq{!kE+z-|E@mQxAQ=iBnbXxP@!Q=S#CcpFIuDgq2=9TxN>9gexBT6F*zk- zFnNPu;)XL!`Ga@)Sv&Q=9a|NJB=u6@5>Yy^jT63SL!*!yVG3yhlTNF28c1pfoZylQ!w@%yt1T(=fA_s~qVoL4y(jgrv9N=ax#A9;5edvsA@drf9w=Jx7jwof z!d5C5l#VS;qCYSArRjB?a=3tVE*d&D|NhVhCCjL&sCBq$jy-q@&9u$~`4QG^?t(p(8xn@l*X8KpL2d-5b;b0!#y2lw9oj->UBnSQmz6 za6Wy_jPi6*eOG6$+zSU@iBS0=veQqfQ3Z8y&a)3U>U<*b35b>HuSopJA#HiQkVMfv z7666u8h4Q0G+v^9>Kx0D0~nKihm zG3@Rg?o{tU(bcaID?xrx(5u(L%U4z4(Snt-{>3!5mOHx)l?RkI3ypMKopEv*$dwy= zgTjY4!*lpXE)^u$BsrPkxit^9Wz;Pj#QpfhaxZcCqk?s(!9V3E0{gOMTv}C(Qj#C? zgvFyUX^Dj2@oiF(rR{OfU!O{jBy6>-&Tc-qXbR{3 z@Te2wUFF$bs6oi{ipo@sHEFyJfB5PKjeY_+bbGQT$qkM zWPr){gZco?*GPQAyU&AzBPc*L!+jO@;*et5DytC5`e{a}F6LXfIM=FfTP_1a^M24Z$ohsT zMZ{&~B18LflxXV4%}2U8t~6$c-jI5`u+YarM7C65Al6=jXPNUFRcA;FUgC6&RhY~% zZ{7{mjkA_YsDZj$ijrbI3UCb5o55cu{ph(2cRrP+aK$bMpVMoY%t&zXBv-u&tXown zwR;!T^wovA%>{OA)z^iTe7s3E)@TPbTRSlOybZnul~3f%ggNqBAt5Ca7B>Xydv@{S zkP)IO8s>EVT&Tmz1A}ZgFf1fr0TSoXOG4KX#}^z(cYsNF{mll7?y@is@ z7v#hPG|Wd^lfHG|Z2Xts&7^AK0~~T2dI5zGY5|R#(C?)n28N+@1Gb&ihVh&ez%ieMry0oQ$r&MvEy*<1pd1UNfhK;E5w54J zLi!gb?7DCd@bz=EtF)dh|&djBcp+Xg1b zfZ*iNLJUD+*(2ENjqw{SbRZ8-xSFyf z^6$M96dynEx%q`ycY^ixFa_cZ6)ir1NXwlrY=uD%%ii>IGA}!;N`(5&$^40zTrzH= ze90NX#d$t?g>2}0Zth*gSAa_Z*8(}&3=mz20pHeA;Jr^n{p=AG%NG5>^DqrmP9(eSCmh zH|E~vgWs;3I=%}fx@?xi5R=2vYe2gVCAsy$mHi!{IBLjo2K)FN>v-sJ0Y^8-1N)zr zEHN^khOzJ3wa-5c5Y7Y6W=`g+fr5epG7(GIC&@c?U!KQ+T4CUjN2?ccF_XT`_!`_QGmhDZCs4@T$X&oG}`2sm=Y#YHj;9Q+-!-V7SQeK-)- z?8{f#r*HYu6Ug8q#+yuEE=y&0YNpXQ<+x%GKI~5-Y88?fiKP5w^K#~jW-6uZRq;EA)(?+tRkZY_(x=O5?m12 zr0!t&%=GD(xQnge7)I6%S+G9fiUA@;0RX0H@27ekI*bze z{CU2};X~7B&raFG%pBd*8n_*oXmmw2{#{NjO!l|px3%E z#54^jWOGU6`GhQ-IP->Zu0$u8@Jd6KV*OxWmvMFm-cmz5iCJ4)jniG)7^%f+Cvo%& zpBFFQFL-72Kb%N`NS>wM$%osJqjFoH5V6=`MJ@7HQwUz4;ZlXiK}0Nj_CM^sKA!>5{-M;&T>+> zoQh*^Xkz1&PuT~lUU=`Aqz;SLI_t#hb3n>TXkkO4hpRd&UBO(^?L}b=ou^fUqiA7p5(5Z@t z;xpS4FsMJT1%n<<6C8ljz%@rYyxmUV`19dHNF4!P50I89TXT6Jfq|O!8Rw^6y2p>7 zK>5q6`yumRWXBr@St%6-x$pJ_;3zh=XdDKoq_x*z#2G({!nM_$aT>HgPN0OUtkPCWkX`EU<5t!YP zdzxW7gnFPqDELI+pv(Z1NMuM0d>{N*ZW-jLklo$z9Cyolo`KIG`=RGEYt6!M77_S) zshH){4>t;^_{x;hS|u)?{xtpFbh0+Y?VjTbT<5uu2e=2NjD#+CG zRu&!>Uj1&L2=F;5H9~}uiLMMtpbr4?)Y z(U>rqbPX34mjQ3tdOoJRkWO~u+?JJJKY82g<0ml{6l$T)oicT`so;Lxg0xkY;S+xD z&L*WQTzxGt)=>ib6LEUwLZjLhze6p4W*r+R_g<%a9FB_-r|W;BSOeTr6*uI8^e%P$ zw1UiP6d&_V1NB7BVa@{nM2ouJEWTHgo)?HM#>w(%%OnRst)dzjTgPYmn>k?4+$!rO zmKVVZ!$3m}Aj)Mkv9z?@1o=0_-EZs*zIgB(5h+?{C~I{14i!td?azx4KN|nROveq; zT7>l1xE{#F-bE?6=}q2NVAk}iKonBDC+6IL7hs#wmzxZb!KnmH5?B4=0C0lbP{;}h zB6T$rosEIS4u#%fdrp=*8euHd0E@=E!hlDF0_BKiLJ7+dJp&XYuXVknrzjm5rzxr?pXqJJ%fu`r#-NmNNC7C! zxfB~2p$DD(XA4T!76m!-NC#Vw!)vO@xA>-@4zqWz-IU}!56+(I#ug4zkn8@SC- zk<)kukEQU~tPJL~Tqt2zDlbY7d<9S5e~bB# zDs<7$bkJjNX!DE(>BOSrNi~!Oaq-hyWCki8IwaAGMKsRdb!si?FarSAT!JEktVOyo(I^ooUX&b3{52a;3d=o{pp5YjB@m1MUF6OAZ0Gi+P%HY@kO5Jr9gmt4PY1T zebBsI$MrntIjHGk;A<#?7+ogCiAF|6HDj&=>^Q}~5GNE8g`$C+2)Q)`#4e+9U;D zK-Cci$wTe4HU-ggFzqP~6tCI(W-)w~$*=&Dm*h%{)gxI7Jm(r%gPP};9A0ggp$Jd_ zjT&-m4$ZSv79Tx(sm(87fjovU&UsV7gtts``Z6C&W+gOFEyoIKj@ag>hG}ExG z1odAukT(O&LF6IPZN|wvh*wRtJeXYYU|$lo9Zc-`KmPc>+p*wo zJm0z5(;fCo!sd*O?_4k=CGtFx6{)@*pJS4Q+(Tg7p?ocWZnTaxkOk`!Mt-!YJ8Jwd zUAm6Z8#Q_uO)5jcXah7~+~)sOcji%5*XtU$d-8ZgrA~^Lie`cm8j2Djik6DCffO`R zK*7Wb0g;d)Q{YIX0-}+EP2vp7xbAs;lVN0fr6NaE)8j z60M(QOaF{#ygw1EhzunnU1IIU{Lq&vdu-JQ?@X|;SS0ThI0D{yM?Mt3+lk08;ND`0 z+_Ilk9gg+`CJIUKJw-nsU-bOf z9zBYlAE;fwcIhQv{N5!I>iXkP5(8c2H$ha62fr}`GoVM+x9=x0P0huR;d=SYuhsP{{nl0Im;C;8QH z*Arp8poylKT2x84VJuw6mjR|C79P;E7i|PdA%~9lRTT4r zE=Ss(gW7y0Jv@qZ<4-w-3VlPkx_G^Ah0~9cTp*=_1VImNx_{q}HOq`SSDETZX(IvR z07^YKR)DyWL~}3oSmPk`#sAIGvcT_yC^|dhmqiOA>ptos=0F3;HR*#Ku}``axi`8- zPB0bWImz^UGrUcBC^auYF+YwP!g>(xfehLfwd=OY4>n8sI#OrNK2M5Cy7DPde+tOf z{QP{$flWoVFR`?%->}fyKy+5)(2eDyj)fAjkCilHiDZ%bC`uAhHxPQbj{na4Y}TH{ z$*;6vNa`uiiH~^op6&c57t17!u@_RElaXZM}1laQGyk55G0e`Z6#u zTjp#2#4Vq^NqdJCU?q(Z8$2@9K@!tH_@kwN(2j{{K;(bYTTkAvc9()Gc#iD)B2ae? zqDE(j8hhWD-Le!xh0bfQy`>P9yP#RX_uWZ?eD+MzKpbL&OBs z+M!k?HXw{s1jWFeee|GQXYUORic*bIKM%Utw@8a+}PGn7`H# zTCNU6KOj7VqFAyO&^{=7wiqo8`wXLjW7fK~Z<10|O=t(t!a%k}*-p%1BB%jk-E_Uk}3J=G)lVDD#qT?i&{i>F3qHaIfUGQ}L*uMlf{)=)Vq+Lk10>E2w{@OVlvlm5t&R#DP0 z+dI7%6^xZM3jqr<%TUN>T2C^dB-&^ZTJ?21%`U*rzv&+$*s+fkQD^Opzf7=upa^qG1j4mB0 zU#2p$;fni%>d)fAuOmc|)KQwzkqCR>P_IdJC|O>P<)X^K47w(X=!?-5e!Q9dCFerO zZLAS76xb&sk6U(#cbC9rNu5R!Hd-NaFPR34OJvFMk7vx7QA3RswbZz`;xnTkjp3C< zy9HpIpyf&_bpUci$Gx1QGwu(XoO{MUZ`ZLB3_^J}2bV_{%vx;C9oo7tgnRW4qBL&p zvCG*%E^ z=;%YF6v%iMIa63P62i(|J>Pg+oG!eXgJ?>oqaR8QFhZ`fL`<;raYdH`{}ZgukdH_f zt%g%5j&kyjpiF2@-%4MNL+a=;ADbcQvs*ik*xv+e{Ze*FI=rQ8RR4=Izh$J~; z+$@Em*>V{y%i|T+?89PeyFInT_>tGi+L2;SljiOH1yE;fU~#bgPLi;4eyX+GuG`LY zl&-d-(TfdydYD*{hg{>p(fP(jr_t$c-53IRbFa$$LLFuzJzAPupCTioRMog{vpB>_ z_4ATjJ)7RtF9_Gx@~MtMPqJXn&xyLBmc7y|J`bl>zO^=%o3L}I|G7{2E?@n}yG+Vv zlb-dsK=*8@^C<_GFj6v^uBFPvDGw zct41V!_ryeNQH{H=Yqv%OYG>WQ(f2>6v~Z%QK)mpk6HJ-lUJG}`J?DbC43s;;ABP5 zfz`TiPIr)-Ceii<6N>r|_UEyDNOprnWJVo81aN&;+dd?IUSkelM1ZjYb12sq#Xg+C z6T6C-XSJ4TzQ`Y?<=81ZJ6Zkp;6zp7`SahhR_&7)y7>;+{!1SZJgqX5;{FrtZHX7O zexoNxXEoozZwuZx(rsGfrilD|;R{g#+WZfZ62geT_b5Es$7PVEi<6l}=DWzv1jTtw zal!6o@vQ!MZaY)Pm)SJC7>pW~m(j0+`ho{LZC7K}_oR1T$(Y-LnJ~57K1#M>EVTR2 z`W-X|fKeo?p#uB9aLS5; zU7z_+3^~@Hx>rU~z%Asuh92)x_J9%Fiy&E!mLFQv(o->L+~QzbZ7Ys@f&oqF_P7SY zjj%oG_Z9EM&9Ayg?)75RQ!mN67wNa6zw?arOa695EE;&bg>V_-*d&JBo~;yoDPvo& zw;ds97VG>dcvjeCh$25Kxk9UD-b5+-WF)tqp%0F>^;*ATW6yCG}3| zH}+Plyy9J#MabOPeau|l0}Kss<9(P{iej%aJDdo-(aIupcv7b2rZ$Uk&cJ+gSlDD0SkIjXSal|a%Vdk za_4;$lhU$Tvu3FRjF`kw)=v2*6FkgH<4)|mZX}b1wVR-+j@nk#5$N|`tp1+uE#BS_Gfc5w@AXTb1B=N%Yb})?B@<|V93?>VSq-OH`L!k| z*Y=hKC_dWsn{s9N!2X)I^!3$yHQa$c_1&O&$26*L!5xa8bK54>u|m{O)AERj+#6UJ zv);h`hiTxJkp0J#j$Zv`z(;u}*0+2fzA;1!+F!R17u-)%P!Dtt(76Ki>C zu}Q`c!p^M!y3dNpgan!KBpKe;PYvB_Upe=nDdv{XcO6Mqj*%jl>@t~fUHx&zuIjH_ zk20K8dyNXuyTmeGN!rA2PkmymW;_0IYgM!wZ(uuYCTY z@Pb$X0G>Z~bD^)6u;l8{|Abu(9@uL}R3HaCCIsa^5`P^``5u4;yy( zqWchw_T8t~PYf+6u-O!2Wrn~Ujm-F=yqLk4-Dv*c8uBL$$BnyK)yI@kH|ino3x6zHpplMbO@`Tfg?_M zCN8TU8`Ki6%wqJ*rYWcgpXINaaZ}=WC7(b^NT0(scU0ptD#bNtrRscGOw9ZfnNstO z+TW?@kxLAfQx0`=ZacMXoER6XHF;;xuH@j$4I~L>OxQ~vmqBjD#l`ii7WC-A4LybI zj1UE5cIN;S=Ha78w^8PGCG}jj_o=A##~p#uYnNUQrXOck>P5j9(SWA0_|?Q8C7~2t zv-4Bi;QJ&NUh#BUyF4QQ^m@N->}xS)fixnJY{cl1Y%cE)7WNxKxVi19 z@&lzU(fsE=f=}I#J-6IrFb42=JL13 z6){w7+P}OR8Q{WE@@qzXEQLN>>B`266ZV?8B1G+!h~g4=4GSQMGV4M9Xzf3gi`WOi zs|(1jwu9nO`$)K}$-XY+bn!KbegP;%|;E8(WJb0xZRcfhq*DCyNp2K>cIYif+1(DXX% z6kqcigO2M;s6h;|W~B6?pg8EX`h>b{*wnyGUfedZQXmA}wuF;L zk+A!k#lwr!Z<@|(vJp{4u}%;-He+L#Sg%KqfPG-(8i@v=H`kwBCV2_=eBs1-{seyl zVef#cAzM;27Sy87$GKcXIkWajoABL&SMFvuMMCxOCH@JX!*Jxtnb1T>4Q2#XhYl33 z%c&ubc~?(C#wKtZlAM5mSOZfMF)dgOKTF5ILuk9;HFlh>8@~diOl;8@HcEd-2Hmsq zNmE{0gktcg^fP2%L47NFBfZ+gal4CI?mGSYxuzG1W|d-P=eecDcva$2?Y%Q*j148u z%pbrSa{d`T8**V)<;gVWl^hlf8kI_I&Fqy$xHbL>cp&8#o@us4^A6|r_bnBM>8D!H zU*VJRMU_{FMo21E-or!O_=3@naKebFkt2SPdIZZ&gM`R^%-`k=m5>*1g>cUV6iFLk zHN~?KNM_p5t)(PZ=6lLC2c5p%C*QXs2E!_054Tl<$6XZi6%`9*j-jcGkC=FTTn1N&dMY=f2T3k|F~*Q3Dqdvn z5#w@1b;AVJNmenwEP9hdOf{4#&#b&o0GyzAnbgH+Y+ZkxZ9|Z66&V~#FYZ(+FAET*7J~T*ac@Wu_Ufc>1g!{H>EU}oj=%pP}c``I@UJD*`n~L zN%KW02M$SKYv{a7kORCYapCg};;wN%*`O+KFSRU@<(g@ed?$$7+yNoJ6RBfsM`fUD^^hh|X=HW_{_{O-w!ep&MBq zGez|;EhriO2wf?AI+|LZjIq)nhQ1g(FIK|qu`&pcDQh%6`afE1v zI^c*M#1}A9h%Xe3?yOfy*Mv=q(DXN|vwM?}O&7M{--)pKU-%vhNOu%3BC0N(5Ru3>7Ec_HJG`SgQeb!$sD* zzT$Zs#J_1X|3`4geecJ7@BF&zm`ZzH%xls_Wk0g3Z-Lm7u=CtwT2T*w9kDGQiO$W5 zvT#ltbx~41PWOdW)8d``3C=h zL1edacFl3_Ne%1UKDLPoJJhz#SKL$1mH(jQgCp2h`6ubWdK_&_Z1KWBOV3t#a0m{V zPKjhdDJx0>cm`z5U;4i~ke4}{|4+uR&bJ92|HZEG-Y Date: Tue, 12 Nov 2024 11:16:22 -0800 Subject: [PATCH 115/138] Fix mypy errors (#38487) --- .../azure/ai/projects/operations/_patch.py | 12 ++++++------ .../async_samples/sample_connections_async.py | 10 +++++----- .../samples/connections/sample_connections.py | 11 ++++++----- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index d57e1eaff98d..dd2f4b5e975c 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -521,9 +521,9 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: try: from azure.ai.inference.tracing import AIInferenceInstrumentor # type: ignore - instrumentor = AIInferenceInstrumentor() - if not instrumentor.is_instrumented(): - instrumentor.instrument() + inference_instrumentor = AIInferenceInstrumentor() + if not inference_instrumentor.is_instrumented(): + inference_instrumentor.instrument() except ModuleNotFoundError: logger.warning( "Could not call `AIInferenceInstrumentor().instrument()` since `azure-ai-inference` is not installed" @@ -532,9 +532,9 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: try: from azure.ai.projects.telemetry.agents import AIAgentsInstrumentor - instrumentor = AIAgentsInstrumentor() - if not instrumentor.is_instrumented(): - instrumentor.instrument() + agents_instrumentor = AIAgentsInstrumentor() + if not agents_instrumentor.is_instrumented(): + agents_instrumentor.instrument() except Exception as exc: logger.warning("Could not call `AIAgentsInstrumentor().instrument()`", exc_info=exc) diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py index 9807df48cb06..dacd454ba3ba 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -104,7 +104,7 @@ async def sample_connections_async() -> None: else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - response = await aoai_client.chat.completions.create( + aoai_response = await aoai_client.chat.completions.create( model=model_deployment_name, messages=[ { @@ -114,13 +114,12 @@ async def sample_connections_async() -> None: ], ) await aoai_client.close() - print(response.choices[0].message.content) + print(aoai_response.choices[0].message.content) elif connection.connection_type == ConnectionType.AZURE_AI_SERVICES: from azure.ai.inference.aio import ChatCompletionsClient from azure.ai.inference.models import UserMessage - from azure.core.credentials_async import AsyncTokenCredential if connection.authentication_type == AuthenticationType.API_KEY: print("====> Creating ChatCompletionsClient using API key authentication") @@ -130,6 +129,7 @@ async def sample_connections_async() -> None: endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key or "") ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: + from azure.core.credentials_async import AsyncTokenCredential # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") inference_client = ChatCompletionsClient( @@ -138,11 +138,11 @@ async def sample_connections_async() -> None: else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - response = await inference_client.complete( + inference_response = await inference_client.complete( model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] ) await inference_client.close() - print(response.choices[0].message.content) + print(inference_response.choices[0].message.content) async def main(): diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index d4db78b2ccac..151a2f624b8b 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -99,7 +99,7 @@ else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - response = aoai_client.chat.completions.create( + aoai_response = aoai_client.chat.completions.create( model=model_deployment_name, messages=[ { @@ -109,7 +109,7 @@ ], ) aoai_client.close() - print(response.choices[0].message.content) + print(aoai_response.choices[0].message.content) elif connection.connection_type == ConnectionType.SERVERLESS: @@ -124,16 +124,17 @@ endpoint=connection.endpoint_url, credential=AzureKeyCredential(connection.key or "") ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: + from azure.core.credentials import TokenCredential # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") inference_client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential=connection.token_credential + endpoint=connection.endpoint_url, credential= cast(TokenCredential, connection.token_credential) ) else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") - response = inference_client.complete( + inference_response = inference_client.complete( model=model_deployment_name, messages=[UserMessage(content="How many feet are in a mile?")] ) inference_client.close() - print(response.choices[0].message.content) + print(inference_response.choices[0].message.content) From 4cd7be7978868613b37386d556b3501d68d33f69 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 12 Nov 2024 11:28:37 -0800 Subject: [PATCH 116/138] Run black --config ../../../eng/black-pyproject.toml --- ...tore_batch_enterprise_file_search_async.py | 26 +- .../samples/agents/sample_agents_basics.py | 4 +- .../async_samples/sample_connections_async.py | 1 + .../samples/connections/sample_connections.py | 3 +- .../async_samples/sample_evaluations_async.py | 8 +- .../sample_evaluations_schedules.py | 4 +- .../tests/agents/test_agents_client.py | 242 +++++------------- .../tests/agents/test_agents_client_async.py | 213 ++++----------- 8 files changed, 127 insertions(+), 374 deletions(-) diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py index 0fe1e6f5a9e0..88f6db207adf 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_vector_store_batch_enterprise_file_search_async.py @@ -49,20 +49,14 @@ async def main(): asset_identifier=asset_uri, asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) - vector_store = await project_client.agents.create_vector_store_and_poll( - file_ids=[], name="sample_vector_store" - ) + vector_store = await project_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") print(f"Created vector store, vector store ID: {vector_store.id}") # add the file to the vector store or you can supply file ids in the vector store creation - vector_store_file_batch = ( - await project_client.agents.create_vector_store_file_batch_and_poll( - vector_store_id=vector_store.id, data_sources=[ds] - ) - ) - print( - f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}" + vector_store_file_batch = await project_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=[ds] ) + print(f"Created vector store file batch, vector store file batch ID: {vector_store_file_batch.id}") # create a file search tool file_search_tool = FileSearchTool(vector_store_ids=[vector_store.id]) @@ -87,15 +81,11 @@ async def main(): ) print(f"Created message, message ID: {message.id}") - run = await project_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Created run, run ID: {run.id}") file_search_tool.remove_vector_store(vector_store.id) - print( - f"Removed vector store from file search, vector store ID: {vector_store.id}" - ) + print(f"Removed vector store from file search, vector store ID: {vector_store.id}") await project_client.agents.update_agent( assistant_id=agent.id, @@ -114,9 +104,7 @@ async def main(): ) print(f"Created message, message ID: {message.id}") - run = await project_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = await project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Created run, run ID: {run.id}") await project_client.agents.delete_vector_store(vector_store.id) diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py index 5bc6b98a221b..1e2820c30cf8 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_basics.py @@ -54,9 +54,7 @@ print(f"Created thread, thread ID: {thread.id}") # [START create_message] - message = project_client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = project_client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") # [END create_message] print(f"Created message, message ID: {message.id}") diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py index dacd454ba3ba..79e54f4a4532 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -130,6 +130,7 @@ async def sample_connections_async() -> None: ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: from azure.core.credentials_async import AsyncTokenCredential + # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") inference_client = ChatCompletionsClient( diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index 151a2f624b8b..abff4010a031 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -125,10 +125,11 @@ ) elif connection.authentication_type == AuthenticationType.ENTRA_ID: from azure.core.credentials import TokenCredential + # MaaS models do not yet support EntraID auth print("====> Creating ChatCompletionsClient using Entra ID authentication") inference_client = ChatCompletionsClient( - endpoint=connection.endpoint_url, credential= cast(TokenCredential, connection.token_credential) + endpoint=connection.endpoint_url, credential=cast(TokenCredential, connection.token_credential) ) else: raise ValueError(f"Authentication type {connection.authentication_type} not supported.") diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py index 09c251e93429..3bc70fd04c9e 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/async_samples/sample_evaluations_async.py @@ -43,9 +43,7 @@ async def main(): # Upload data for evaluation data_id, _ = project_client.upload_file("./data/evaluate_test_data.jsonl") - default_connection = await project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI - ) + default_connection = await project_client.connections.get_default(connection_type=ConnectionType.AZURE_OPEN_AI) deployment_name = "<>" api_version = "<>" @@ -82,9 +80,7 @@ async def main(): evaluation_response = await project_client.evaluations.create(evaluation) # Get evaluation - get_evaluation_response = await project_client.evaluations.get( - evaluation_response.id - ) + get_evaluation_response = await project_client.evaluations.get(evaluation_response.id) print("----------------------------------------------------------------") print("Created evaluation, evaluation ID: ", get_evaluation_response.id) diff --git a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py index a9e1b18da935..4919ba692243 100644 --- a/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py +++ b/sdk/ai/azure-ai-projects/samples/evaluations/sample_evaluations_schedules.py @@ -61,9 +61,7 @@ def main(): tags=tags, ) - evaluation_schedule = ai_client.evaluations.create_or_replace_schedule( - name, evaluation_schedule - ) + evaluation_schedule = ai_client.evaluations.create_or_replace_schedule(name, evaluation_schedule) print(evaluation_schedule.provisioning_state) print(evaluation_schedule) diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py index 38830e92068b..490ee6433614 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client.py @@ -132,9 +132,7 @@ def create_client(self, **kwargs): def _get_data_file(self) -> str: """Return the test file name.""" - return os.path.join( - os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md" - ) + return os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") # for debugging purposes: if a test fails and its agent has not been deleted, it will continue to show up in the agents list """ @@ -189,16 +187,12 @@ def test_create_update_delete_agent(self, **kwargs): print("Created client") # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) # update agent - agent = client.agents.update_agent( - agent.id, name="my-agent2", instructions="You are helpful agent" - ) + agent = client.agents.update_agent(agent.id, name="my-agent2", instructions="You are helpful agent") assert agent.name == "my-agent2" # delete agent and close client @@ -227,13 +221,8 @@ def test_create_agent_with_tools(self, **kwargs): assert agent.id print("Created agent, agent ID", agent.id) assert agent.tools - assert ( - agent.tools[0]["function"]["name"] - == functions.definitions[0]["function"]["name"] - ) - print( - "Tool successfully submitted:", functions.definitions[0]["function"]["name"] - ) + assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) # delete agent and close client client.agents.delete_agent(agent.id) @@ -261,13 +250,8 @@ def test_create_agent_with_tools_and_resources(self, **kwargs): assert agent.id print("Created agent, agent ID", agent.id) assert agent.tools - assert ( - agent.tools[0]["function"]["name"] - == functions.definitions[0]["function"]["name"] - ) - print( - "Tool successfully submitted:", functions.definitions[0]["function"]["name"] - ) + assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) # delete agent and close client client.agents.delete_agent(agent.id) @@ -282,9 +266,7 @@ def test_update_agent(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id # update agent and confirm changes went through @@ -344,9 +326,7 @@ def test_create_thread(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -370,9 +350,7 @@ def test_get_thread(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -471,9 +449,7 @@ def test_create_message(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -483,9 +459,7 @@ def test_create_message(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") assert message.id print("Created message, message ID", message.id) @@ -503,9 +477,7 @@ def test_create_multiple_messages(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -515,19 +487,13 @@ def test_create_multiple_messages(self, **kwargs): print("Created thread, thread ID", thread.id) # create messages - message = client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") assert message.id print("Created message, message ID", message.id) - message2 = client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me another joke" - ) + message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") assert message2.id print("Created message, message ID", message2.id) - message3 = client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a third joke" - ) + message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") assert message3.id print("Created message, message ID", message3.id) @@ -545,9 +511,7 @@ def test_list_messages(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -562,29 +526,21 @@ def test_list_messages(self, **kwargs): assert messages0.data.__len__() == 0 # create messages and check message list for each one - message1 = client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message1 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") assert message1.id print("Created message, message ID", message1.id) messages1 = client.agents.list_messages(thread_id=thread.id) assert messages1.data.__len__() == 1 assert messages1.data[0].id == message1.id - message2 = client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me another joke" - ) + message2 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me another joke") assert message2.id print("Created message, message ID", message2.id) messages2 = client.agents.list_messages(thread_id=thread.id) assert messages2.data.__len__() == 2 - assert ( - messages2.data[0].id == message2.id or messages2.data[1].id == message2.id - ) + assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id - message3 = client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a third joke" - ) + message3 = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a third joke") assert message3.id print("Created message, message ID", message3.id) messages3 = client.agents.list_messages(thread_id=thread.id) @@ -609,9 +565,7 @@ def test_get_message(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -621,9 +575,7 @@ def test_get_message(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") assert message.id print("Created message, message ID", message.id) @@ -688,9 +640,7 @@ def test_create_run(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -718,9 +668,7 @@ def test_get_run(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -755,9 +703,7 @@ def test_run_status(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -767,9 +713,7 @@ def test_run_status(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") assert message.id print("Created message, message ID", message.id) @@ -917,9 +861,7 @@ def test_submit_tool_outputs_to_run(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, what time is it?" - ) + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) @@ -930,13 +872,8 @@ def test_submit_tool_outputs_to_run(self, **kwargs): # check that tools are uploaded assert run.tools - assert ( - run.tools[0]["function"]["name"] - == functions.definitions[0]["function"]["name"] - ) - print( - "Tool successfully submitted:", functions.definitions[0]["function"]["name"] - ) + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) # check status assert run.status in [ @@ -954,10 +891,7 @@ def test_submit_tool_outputs_to_run(self, **kwargs): run = client.agents.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed - if ( - run.status == "requires_action" - and run.required_action.submit_tool_outputs - ): + if run.status == "requires_action" and run.required_action.submit_tool_outputs: print("Requires action: submit tool outputs") tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: @@ -968,9 +902,7 @@ def test_submit_tool_outputs_to_run(self, **kwargs): break # submit tool outputs to run - tool_outputs = toolset.execute_tool_calls( - tool_calls - ) # TODO issue somewhere here + tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here print("Tool outputs:", tool_outputs) if tool_outputs: client.agents.submit_tool_outputs_to_run( @@ -1052,9 +984,7 @@ def test_create_thread_and_run(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -1106,9 +1036,7 @@ def test_list_run_step(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -1118,9 +1046,7 @@ def test_list_run_step(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, what time is it?" - ) + message = client.agents.create_message(thread_id=thread.id, role="user", content="Hello, what time is it?") assert message.id print("Created message, message ID", message.id) @@ -1167,9 +1093,7 @@ def test_get_run_step(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -1217,9 +1141,7 @@ def test_get_run_step(self, **kwargs): steps = client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) assert steps["data"].__len__() > 0 step = steps["data"][0] - get_step = client.agents.get_run_step( - thread_id=thread.id, run_id=run.id, step_id=step.id - ) + get_step = client.agents.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) assert step == get_step # delete agent and close client @@ -1350,9 +1272,7 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): ) assert agent.id - thread = ai_client.agents.create_thread( - tool_resources=ToolResources(file_search=fs) - ) + thread = ai_client.agents.create_thread(tool_resources=ToolResources(file_search=fs)) assert thread.id # create message message = ai_client.agents.create_message( @@ -1360,9 +1280,7 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): ) assert message.id, "The message was not created." - run = ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) assert run.status == "completed", f"Error in run: {run.last_error}" messages = ai_client.agents.list_messages(thread.id) assert len(messages) @@ -1373,9 +1291,7 @@ def test_vector_store_threads_file_search_azure(self, **kwargs): @recorded_by_proxy def test_create_vector_store_add_file_file_id(self, **kwargs): """Test adding single file to vector store withn file ID.""" - self._do_test_create_vector_store_add_file( - file_path=self._get_data_file(), **kwargs - ) + self._do_test_create_vector_store_add_file(file_path=self._get_data_file(), **kwargs) @agentClientPreparer() # @pytest.markp("The CreateVectorStoreFile API is not supported yet.") @@ -1400,9 +1316,7 @@ def _do_test_create_vector_store_add_file(self, **kwargs): asset_type="uri_asset", ) ] - vector_store = ai_client.agents.create_vector_store_and_poll( - file_ids=[], name="sample_vector_store" - ) + vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id vector_store_file = ai_client.agents.create_vector_store_file( vector_store_id=vector_store.id, data_sources=ds, file_id=file_id @@ -1414,9 +1328,7 @@ def _do_test_create_vector_store_add_file(self, **kwargs): @recorded_by_proxy def test_create_vector_store_batch_file_ids(self, **kwargs): """Test adding multiple files to vector store with file IDs.""" - self._do_test_create_vector_store_batch( - file_path=self._get_data_file(), **kwargs - ) + self._do_test_create_vector_store_batch(file_path=self._get_data_file(), **kwargs) @agentClientPreparer() # @pytest.markp("The CreateFileBatch API is not supported yet.") @@ -1443,14 +1355,10 @@ def _do_test_create_vector_store_batch(self, **kwargs): asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = ai_client.agents.create_vector_store_and_poll( - file_ids=[], name="sample_vector_store" - ) + vector_store = ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id - vector_store_file_batch = ( - ai_client.agents.create_vector_store_file_batch_and_poll( - vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids - ) + vector_store_file_batch = ai_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids ) assert vector_store_file_batch.id self._test_file_search(ai_client, vector_store, file_id) @@ -1481,9 +1389,7 @@ def _test_file_search( ) assert message.id, "The message was not created." - run = ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) ai_client.agents.delete_vector_store(vector_store.id) assert run.status == "completed", f"Error in run: {run.last_error}" messages = ai_client.agents.list_messages(thread.id) @@ -1545,9 +1451,7 @@ def _do_test_message_attachment(self, **kwargs): ) assert message.id, "The message was not created." - run = ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) assert run.id, "The run was not created." self._remove_file_maybe(file_id, ai_client) ai_client.agents.delete_agent(agent.id) @@ -1570,9 +1474,7 @@ def test_create_assistant_with_interpreter_azure(self, **kwargs): @recorded_by_proxy def test_create_assistant_with_interpreter_file_ids(self, **kwargs): """Test Create assistant with code interpreter with file IDs.""" - self._do_test_create_assistant_with_interpreter( - file_path=self._get_data_file(), **kwargs - ) + self._do_test_create_assistant_with_interpreter(file_path=self._get_data_file(), **kwargs) def _do_test_create_assistant_with_interpreter(self, **kwargs): """Test create assistant with code interpreter and project asset id""" @@ -1583,9 +1485,7 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = ai_client.agents.upload_file_and_poll( - file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS - ) + file = ai_client.agents.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS) assert file.id, "The file was not uploaded." file_id = file.id @@ -1612,16 +1512,12 @@ def _do_test_create_assistant_with_interpreter(self, **kwargs): ) assert message.id, "The message was not created." - run = ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) assert run.id, "The run was not created." self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" ai_client.agents.delete_agent(agent.id) - assert len( - ai_client.agents.list_messages(thread_id=thread.id) - ), "No messages were created" + assert len(ai_client.agents.list_messages(thread_id=thread.id)), "No messages were created" @agentClientPreparer() @pytest.mark.skip("The API is not supported yet.") @@ -1638,9 +1534,7 @@ def test_create_thread_with_interpreter_azure(self, **kwargs): @recorded_by_proxy def test_create_thread_with_interpreter_file_ids(self, **kwargs): """Test Create assistant with code interpreter with file IDs.""" - self._do_test_create_thread_with_interpreter( - file_path=self._get_data_file(), **kwargs - ) + self._do_test_create_thread_with_interpreter(file_path=self._get_data_file(), **kwargs) def _do_test_create_thread_with_interpreter(self, **kwargs): """Test create assistant with code interpreter and project asset id""" @@ -1651,9 +1545,7 @@ def _do_test_create_thread_with_interpreter(self, **kwargs): file_id = None if "file_path" in kwargs: - file = ai_client.agents.upload_file_and_poll( - file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS - ) + file = ai_client.agents.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS) assert file.id, "The file was not uploaded." file_id = file.id @@ -1679,9 +1571,7 @@ def _do_test_create_thread_with_interpreter(self, **kwargs): ) assert message.id, "The message was not created." - run = ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) assert run.id, "The run was not created." self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" @@ -1729,9 +1619,7 @@ def test_create_assistant_with_inline_vs_azure(self, **kwargs): ) assert message.id, "The message was not created." - run = ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) assert run.status == "completed", f"Error in run: {run.last_error}" messages = ai_client.agents.list_messages(thread.id) assert len(messages) @@ -1753,9 +1641,7 @@ def test_create_attachment_in_thread_azure(self, **kwargs): @recorded_by_proxy def test_create_attachment_in_thread_file_ids(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" - self._do_test_create_attachment_in_thread_azure( - file_path=self._get_data_file(), **kwargs - ) + self._do_test_create_attachment_in_thread_azure(file_path=self._get_data_file(), **kwargs) def _do_test_create_attachment_in_thread_azure(self, **kwargs): # create client @@ -1790,9 +1676,7 @@ def _do_test_create_attachment_in_thread_azure(self, **kwargs): thread = ai_client.agents.create_thread(messages=[message]) assert thread.id - run = ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) assert run.status == "completed", f"Error in run: {run.last_error}" messages = ai_client.agents.list_messages(thread.id) assert len(messages) @@ -1802,9 +1686,7 @@ def _do_test_create_attachment_in_thread_azure(self, **kwargs): def _get_file_id_maybe(self, ai_client: AIProjectClient, **kwargs) -> str: """Return file id if kwargs has file path.""" if "file_path" in kwargs: - file = ai_client.agents.upload_file_and_poll( - file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS - ) + file = ai_client.agents.upload_file_and_poll(file_path=kwargs["file_path"], purpose=FilePurpose.AGENTS) assert file.id, "The file was not uploaded." return file.id return None @@ -1857,9 +1739,7 @@ def test_code_interpreter_and_save_file(self, **kwargs): print(f"Created message, message ID: {message.id}") # create run - run = client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) print(f"Run finished with status: {run.status}") # delete file @@ -1878,9 +1758,7 @@ def test_code_interpreter_and_save_file(self, **kwargs): file_id = file_path_annotation.file_path.file_id print(f"Image File ID: {file_path_annotation.file_path.file_id}") temp_file_path = os.path.join(temp_dir, "output.png") - client.agents.save_file( - file_id=file_id, file_name="output.png", target_dir=temp_dir - ) + client.agents.save_file(file_id=file_id, file_name="output.png", target_dir=temp_dir) output_file_exist = os.path.exists(temp_file_path) assert output_file_exist diff --git a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py index 693aca2dbb6d..f62d4995204c 100644 --- a/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py +++ b/sdk/ai/azure-ai-projects/tests/agents/test_agents_client_async.py @@ -126,9 +126,7 @@ def create_client(self, **kwargs): def _get_data_file(self) -> str: """Return the test file name.""" - return os.path.join( - os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md" - ) + return os.path.join(os.path.dirname(os.path.dirname(__file__)), "test_data", "product_info_1.md") # for debugging purposes: if a test fails and its agent has not been deleted, it will continue to show up in the agents list """ @@ -183,9 +181,7 @@ async def test_create_delete_agent(self, **kwargs): print("Created client") # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -215,13 +211,8 @@ async def test_create_agent_with_tools(self, **kwargs): assert agent.id print("Created agent, agent ID", agent.id) assert agent.tools - assert ( - agent.tools[0]["function"]["name"] - == functions.definitions[0]["function"]["name"] - ) - print( - "Tool successfully submitted:", functions.definitions[0]["function"]["name"] - ) + assert agent.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) # delete agent and close client await client.agents.delete_agent(agent.id) @@ -236,9 +227,7 @@ async def test_update_agent(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id # update agent and confirm changes went through @@ -298,9 +287,7 @@ async def test_create_thread(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -324,9 +311,7 @@ async def test_get_thread(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -425,9 +410,7 @@ async def test_create_message(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -437,9 +420,7 @@ async def test_create_message(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = await client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") assert message.id print("Created message, message ID", message.id) @@ -457,9 +438,7 @@ async def test_create_multiple_messages(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -469,9 +448,7 @@ async def test_create_multiple_messages(self, **kwargs): print("Created thread, thread ID", thread.id) # create messages - message = await client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") assert message.id print("Created message, message ID", message.id) message2 = await client.agents.create_message( @@ -499,9 +476,7 @@ async def test_list_messages(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -516,9 +491,7 @@ async def test_list_messages(self, **kwargs): assert messages0.data.__len__() == 0 # create messages and check message list for each one - message1 = await client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message1 = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") assert message1.id print("Created message, message ID", message1.id) messages1 = await client.agents.list_messages(thread_id=thread.id) @@ -532,9 +505,7 @@ async def test_list_messages(self, **kwargs): print("Created message, message ID", message2.id) messages2 = await client.agents.list_messages(thread_id=thread.id) assert messages2.data.__len__() == 2 - assert ( - messages2.data[0].id == message2.id or messages2.data[1].id == message2.id - ) + assert messages2.data[0].id == message2.id or messages2.data[1].id == message2.id message3 = await client.agents.create_message( thread_id=thread.id, role="user", content="Hello, tell me a third joke" @@ -563,9 +534,7 @@ async def test_get_message(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -575,16 +544,12 @@ async def test_get_message(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = await client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") assert message.id print("Created message, message ID", message.id) # get message - message2 = await client.agents.get_message( - thread_id=thread.id, message_id=message.id - ) + message2 = await client.agents.get_message(thread_id=thread.id, message_id=message.id) assert message2.id assert message.id == message2.id print("Got message, message ID", message.id) @@ -644,9 +609,7 @@ async def test_create_run(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -674,9 +637,7 @@ async def test_get_run(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -711,9 +672,7 @@ async def test_run_status(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -723,9 +682,7 @@ async def test_run_status(self, **kwargs): print("Created thread, thread ID", thread.id) # create message - message = await client.agents.create_message( - thread_id=thread.id, role="user", content="Hello, tell me a joke" - ) + message = await client.agents.create_message(thread_id=thread.id, role="user", content="Hello, tell me a joke") assert message.id print("Created message, message ID", message.id) @@ -886,13 +843,8 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): # check that tools are uploaded assert run.tools - assert ( - run.tools[0]["function"]["name"] - == functions.definitions[0]["function"]["name"] - ) - print( - "Tool successfully submitted:", functions.definitions[0]["function"]["name"] - ) + assert run.tools[0]["function"]["name"] == functions.definitions[0]["function"]["name"] + print("Tool successfully submitted:", functions.definitions[0]["function"]["name"]) # check status assert run.status in [ @@ -910,10 +862,7 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): run = await client.agents.get_run(thread_id=thread.id, run_id=run.id) # check if tools are needed - if ( - run.status == "requires_action" - and run.required_action.submit_tool_outputs - ): + if run.status == "requires_action" and run.required_action.submit_tool_outputs: print("Requires action: submit tool outputs") tool_calls = run.required_action.submit_tool_outputs.tool_calls if not tool_calls: @@ -924,9 +873,7 @@ async def test_submit_tool_outputs_to_run(self, **kwargs): break # submit tool outputs to run - tool_outputs = toolset.execute_tool_calls( - tool_calls - ) # TODO issue somewhere here + tool_outputs = toolset.execute_tool_calls(tool_calls) # TODO issue somewhere here print("Tool outputs:", tool_outputs) if tool_outputs: await client.agents.submit_tool_outputs_to_run( @@ -1008,9 +955,7 @@ async def test_create_thread_and_run(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -1062,9 +1007,7 @@ async def test_list_run_step(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -1102,9 +1045,7 @@ async def test_list_run_step(self, **kwargs): "completed", ] print("Run status:", run.status) - steps = await client.agents.list_run_steps( - thread_id=thread.id, run_id=run.id - ) + steps = await client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) assert steps["data"].__len__() > 0 # TODO what else should we look at? assert run.status == "completed" @@ -1125,9 +1066,7 @@ async def test_get_run_step(self, **kwargs): assert isinstance(client, AIProjectClient) # create agent - agent = await client.agents.create_agent( - model="gpt-4o", name="my-agent", instructions="You are helpful agent" - ) + agent = await client.agents.create_agent(model="gpt-4o", name="my-agent", instructions="You are helpful agent") assert agent.id print("Created agent, agent ID", agent.id) @@ -1175,9 +1114,7 @@ async def test_get_run_step(self, **kwargs): steps = await client.agents.list_run_steps(thread_id=thread.id, run_id=run.id) assert steps["data"].__len__() > 0 step = steps["data"][0] - get_step = await client.agents.get_run_step( - thread_id=thread.id, run_id=run.id, step_id=step.id - ) + get_step = await client.agents.get_run_step(thread_id=thread.id, run_id=run.id, step_id=step.id) assert step == get_step # delete agent and close client @@ -1195,9 +1132,7 @@ async def test_create_vector_store_azure(self, **kwargs): @recorded_by_proxy_async async def test_create_vector_store_file_id(self, **kwargs): """Test the agent with vector store creation.""" - await self._do_test_create_vector_store( - file_path=self._get_data_file(), **kwargs - ) + await self._do_test_create_vector_store(file_path=self._get_data_file(), **kwargs) async def _do_test_create_vector_store(self, **kwargs): """Test the agent with vector store creation.""" @@ -1226,9 +1161,7 @@ async def _do_test_create_vector_store(self, **kwargs): @recorded_by_proxy_async async def test_create_vector_store_add_file_file_id(self, **kwargs): """Test adding single file to vector store withn file ID.""" - await self._do_test_create_vector_store_add_file( - file_path=self._get_data_file(), **kwargs - ) + await self._do_test_create_vector_store_add_file(file_path=self._get_data_file(), **kwargs) @agentClientPreparer() @recorded_by_proxy_async @@ -1252,9 +1185,7 @@ async def _do_test_create_vector_store_add_file(self, **kwargs): asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = await ai_client.agents.create_vector_store_and_poll( - file_ids=[], name="sample_vector_store" - ) + vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id vector_store_file = await ai_client.agents.create_vector_store_file( vector_store_id=vector_store.id, data_sources=ds, file_id=file_id @@ -1266,9 +1197,7 @@ async def _do_test_create_vector_store_add_file(self, **kwargs): @recorded_by_proxy_async async def test_create_vector_store_batch_file_ids(self, **kwargs): """Test adding multiple files to vector store with file IDs.""" - await self._do_test_create_vector_store_batch( - file_path=self._get_data_file(), **kwargs - ) + await self._do_test_create_vector_store_batch(file_path=self._get_data_file(), **kwargs) @agentClientPreparer() @recorded_by_proxy_async @@ -1294,21 +1223,15 @@ async def _do_test_create_vector_store_batch(self, **kwargs): asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) ] - vector_store = await ai_client.agents.create_vector_store_and_poll( - file_ids=[], name="sample_vector_store" - ) + vector_store = await ai_client.agents.create_vector_store_and_poll(file_ids=[], name="sample_vector_store") assert vector_store.id - vector_store_file_batch = ( - await ai_client.agents.create_vector_store_file_batch_and_poll( - vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids - ) + vector_store_file_batch = await ai_client.agents.create_vector_store_file_batch_and_poll( + vector_store_id=vector_store.id, data_sources=ds, file_ids=file_ids ) assert vector_store_file_batch.id await self._test_file_search(ai_client, vector_store, file_id) - async def _test_file_search( - self, ai_client: AIProjectClient, vector_store: VectorStore, file_id: str - ) -> None: + async def _test_file_search(self, ai_client: AIProjectClient, vector_store: VectorStore, file_id: str) -> None: """Test the file search""" file_search = FileSearchTool(vector_store_ids=[vector_store.id]) agent = await ai_client.agents.create_agent( @@ -1327,9 +1250,7 @@ async def _test_file_search( ) assert message.id, "The message was not created." - run = await ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) await ai_client.agents.delete_vector_store(vector_store.id) assert run.status == "completed" messages = await ai_client.agents.list_messages(thread_id=thread.id) @@ -1355,9 +1276,7 @@ async def test_message_attachement_azure(self, **kwargs): @recorded_by_proxy_async async def test_message_attachement_file_ids(self, **kwargs): """Test message attachment with file ID.""" - await self._do_test_message_attachment( - file_path=self._get_data_file(), **kwargs - ) + await self._do_test_message_attachment(file_path=self._get_data_file(), **kwargs) async def _do_test_message_attachment(self, **kwargs): """Test agent with the message attachment.""" @@ -1395,9 +1314,7 @@ async def _do_test_message_attachment(self, **kwargs): ) assert message.id, "The message was not created." - run = await ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) assert run.id, "The run was not created." await self._remove_file_maybe(file_id, ai_client) await ai_client.agents.delete_agent(agent.id) @@ -1437,9 +1354,7 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): ) assert agent.id - thread = await ai_client.agents.create_thread( - tool_resources=ToolResources(file_search=fs) - ) + thread = await ai_client.agents.create_thread(tool_resources=ToolResources(file_search=fs)) assert thread.id # create message message = await ai_client.agents.create_message( @@ -1447,9 +1362,7 @@ async def test_vector_store_threads_file_search_azure(self, **kwargs): ) assert message.id, "The message was not created." - run = await ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) assert run.status == "completed", f"Error in run: {run.last_error}" messages = await ai_client.agents.list_messages(thread.id) assert len(messages) @@ -1465,17 +1378,13 @@ async def test_create_assistant_with_interpreter_azure(self, **kwargs): asset_identifier=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) - await self._do_test_create_assistant_with_interpreter( - data_sources=[ds], **kwargs - ) + await self._do_test_create_assistant_with_interpreter(data_sources=[ds], **kwargs) @agentClientPreparer() @recorded_by_proxy_async async def test_create_assistant_with_interpreter_file_ids(self, **kwargs): """Test Create assistant with code interpreter with file IDs.""" - await self._do_test_create_assistant_with_interpreter( - file_path=self._get_data_file(), **kwargs - ) + await self._do_test_create_assistant_with_interpreter(file_path=self._get_data_file(), **kwargs) async def _do_test_create_assistant_with_interpreter(self, **kwargs): """Test create assistant with code interpreter and project asset id""" @@ -1515,9 +1424,7 @@ async def _do_test_create_assistant_with_interpreter(self, **kwargs): ) assert message.id, "The message was not created." - run = await ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) assert run.id, "The run was not created." await self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" @@ -1540,9 +1447,7 @@ async def test_create_thread_with_interpreter_azure(self, **kwargs): @recorded_by_proxy_async async def test_create_thread_with_interpreter_file_ids(self, **kwargs): """Test Create assistant with code interpreter with file IDs.""" - await self._do_test_create_thread_with_interpreter( - file_path=self._get_data_file(), **kwargs - ) + await self._do_test_create_thread_with_interpreter(file_path=self._get_data_file(), **kwargs) async def _do_test_create_thread_with_interpreter(self, **kwargs): """Test create assistant with code interpreter and project asset id""" @@ -1581,9 +1486,7 @@ async def _do_test_create_thread_with_interpreter(self, **kwargs): ) assert message.id, "The message was not created." - run = await ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) assert run.id, "The run was not created." await self._remove_file_maybe(file_id, ai_client) assert run.status == "completed", f"Error in run: {run.last_error}" @@ -1631,9 +1534,7 @@ async def test_create_assistant_with_inline_vs_azure(self, **kwargs): ) assert message.id, "The message was not created." - run = await ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) assert run.status == "completed", f"Error in run: {run.last_error}" messages = await ai_client.agents.list_messages(thread.id) assert len(messages) @@ -1649,17 +1550,13 @@ async def test_create_attachment_in_thread_azure(self, **kwargs): asset_identifier=kwargs["azure_ai_projects_data_path"], asset_type=VectorStoreDataSourceAssetType.URI_ASSET, ) - await self._do_test_create_attachment_in_thread_azure( - data_sources=[ds], **kwargs - ) + await self._do_test_create_attachment_in_thread_azure(data_sources=[ds], **kwargs) @agentClientPreparer() @recorded_by_proxy_async async def test_create_attachment_in_thread_file_ids(self, **kwargs): """Create thread with message attachment inline with azure asset IDs.""" - await self._do_test_create_attachment_in_thread_azure( - file_path=self._get_data_file(), **kwargs - ) + await self._do_test_create_attachment_in_thread_azure(file_path=self._get_data_file(), **kwargs) async def _do_test_create_attachment_in_thread_azure(self, **kwargs): # create client @@ -1694,9 +1591,7 @@ async def _do_test_create_attachment_in_thread_azure(self, **kwargs): thread = await ai_client.agents.create_thread(messages=[message]) assert thread.id - run = await ai_client.agents.create_and_process_run( - thread_id=thread.id, assistant_id=agent.id - ) + run = await ai_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) assert run.status == "completed", f"Error in run: {run.last_error}" messages = await ai_client.agents.list_messages(thread.id) assert len(messages) @@ -1713,9 +1608,7 @@ async def _get_file_id_maybe(self, ai_client: AIProjectClient, **kwargs) -> str: return file.id return None - async def _remove_file_maybe( - self, file_id: str, ai_client: AIProjectClient - ) -> None: + async def _remove_file_maybe(self, file_id: str, ai_client: AIProjectClient) -> None: """Remove file if we have file ID.""" if file_id: await ai_client.agents.delete_file(file_id) From fb046e25975681d25915dd354040b1b4f6794e83 Mon Sep 17 00:00:00 2001 From: M-Hietala <78813398+M-Hietala@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:20:40 -0600 Subject: [PATCH 117/138] agents tracing pylint fixes (#38496) Co-authored-by: Marko Hietala --- .../agents/_ai_agents_instrumentor.py | 123 +++++++++++------- 1 file changed, 77 insertions(+), 46 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py index 8018ad00fd33..075ee21f6299 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py @@ -63,7 +63,7 @@ # pylint: disable = no-name-in-module from opentelemetry.trace import Span, StatusCode - from azure.core.tracing import AbstractSpan # type: ignore + from azure.core.tracing import AbstractSpan # pylint: disable=C0412 # type: ignore _tracing_library_available = True except ModuleNotFoundError: @@ -154,6 +154,7 @@ def is_content_recording_enabled(self) -> bool: class _AIAgentsInstrumentorPreview: + # pylint: disable=R0904 """ A class for managing the trace instrumentation of AI Agents. @@ -455,7 +456,7 @@ def start_thread_run_span( additional_messages: Optional[List[ThreadMessage]] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, - tools: Optional[List[ToolDefinition]] = None, + _tools: Optional[List[ToolDefinition]] = None, max_prompt_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, @@ -517,9 +518,9 @@ def start_create_agent_span( name: Optional[str] = None, description: Optional[str] = None, instructions: Optional[str] = None, - tools: Optional[List[ToolDefinition]] = None, - tool_resources: Optional[ToolResources] = None, - toolset: Optional[ToolSet] = None, + _tools: Optional[List[ToolDefinition]] = None, + _tool_resources: Optional[ToolResources] = None, + _toolset: Optional[ToolSet] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, response_format: Optional["_types.AgentsApiResponseFormatOption"] = None, @@ -546,7 +547,7 @@ def start_create_thread_span( self, project_name: str, messages: Optional[List[ThreadMessage]] = None, - tool_resources: Optional[ToolResources] = None, + _tool_resources: Optional[ToolResources] = None, ) -> "Optional[AbstractSpan]": span = start_span(OperationName.CREATE_THREAD, project_name) if span and span.span_instance.is_recording: @@ -559,7 +560,9 @@ def start_list_messages_span(self, project_name: str, thread_id: Optional[str] = return start_span(OperationName.LIST_MESSAGES, project_name, thread_id=thread_id) def trace_create_agent(self, function, *args, **kwargs): - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name name = kwargs.get("name") model = kwargs.get("model") description = kwargs.get("description") @@ -577,15 +580,15 @@ def trace_create_agent(self, function, *args, **kwargs): model=model, description=description, instructions=instructions, - tools=tools, - tool_resources=tool_resources, - toolset=toolset, + _tools=tools, + _tool_resources=tool_resources, + _toolset=toolset, temperature=temperature, top_p=top_p, response_format=response_format, ) - if span == None: + if span is None: return function(*args, **kwargs) with span: @@ -608,7 +611,9 @@ def trace_create_agent(self, function, *args, **kwargs): return result async def trace_create_agent_async(self, function, *args, **kwargs): - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name name = kwargs.get("name") model = kwargs.get("model") description = kwargs.get("description") @@ -626,15 +631,15 @@ async def trace_create_agent_async(self, function, *args, **kwargs): model=model, description=description, instructions=instructions, - tools=tools, - tool_resources=tool_resources, - toolset=toolset, + _tools=tools, + _tool_resources=tool_resources, + _toolset=toolset, temperature=temperature, top_p=top_p, response_format=response_format, ) - if span == None: + if span is None: return await function(*args, **kwargs) with span: @@ -657,12 +662,14 @@ async def trace_create_agent_async(self, function, *args, **kwargs): return result def trace_create_thread(self, function, *args, **kwargs): - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name messages = kwargs.get("messages") span = self.start_create_thread_span(project_name=project_name, messages=messages) - if span == None: + if span is None: return function(*args, **kwargs) with span: @@ -685,12 +692,14 @@ def trace_create_thread(self, function, *args, **kwargs): return result async def trace_create_thread_async(self, function, *args, **kwargs): - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name messages = kwargs.get("messages") span = self.start_create_thread_span(project_name=project_name, messages=messages) - if span == None: + if span is None: return await function(*args, **kwargs) with span: @@ -713,7 +722,9 @@ async def trace_create_thread_async(self, function, *args, **kwargs): return result def trace_create_message(self, function, *args, **kwargs): - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name thread_id = kwargs.get("thread_id") role = kwargs.get("role") content = kwargs.get("content") @@ -723,7 +734,7 @@ def trace_create_message(self, function, *args, **kwargs): project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments ) - if span == None: + if span is None: return function(*args, **kwargs) with span: @@ -746,7 +757,9 @@ def trace_create_message(self, function, *args, **kwargs): return result async def trace_create_message_async(self, function, *args, **kwargs): - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name thread_id = kwargs.get("thread_id") role = kwargs.get("role") content = kwargs.get("content") @@ -756,7 +769,7 @@ async def trace_create_message_async(self, function, *args, **kwargs): project_name=project_name, thread_id=thread_id, content=content, role=role, attachments=attachments ) - if span == None: + if span is None: return await function(*args, **kwargs) with span: @@ -779,7 +792,9 @@ async def trace_create_message_async(self, function, *args, **kwargs): return result def trace_create_run(self, operation_name, function, *args, **kwargs): - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name thread_id = kwargs.get("thread_id") assistant_id = kwargs.get("assistant_id") model = kwargs.get("model") @@ -803,14 +818,14 @@ def trace_create_run(self, operation_name, function, *args, **kwargs): additional_instructions=additional_instructions, additional_messages=additional_messages, temperature=temperature, - tools=tools, + _tools=tools, top_p=top_p, max_prompt_tokens=max_prompt_tokens, max_completion_tokens=max_completion_tokens, response_format=response_format, ) - if span == None: + if span is None: return function(*args, **kwargs) with span: @@ -833,7 +848,9 @@ def trace_create_run(self, operation_name, function, *args, **kwargs): return result async def trace_create_run_async(self, operation_name, function, *args, **kwargs): - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name thread_id = kwargs.get("thread_id") assistant_id = kwargs.get("assistant_id") model = kwargs.get("model") @@ -857,14 +874,14 @@ async def trace_create_run_async(self, operation_name, function, *args, **kwargs additional_instructions=additional_instructions, additional_messages=additional_messages, temperature=temperature, - tools=tools, + _tools=tools, top_p=top_p, max_prompt_tokens=max_prompt_tokens, max_completion_tokens=max_completion_tokens, response_format=response_format, ) - if span == None: + if span is None: return await function(*args, **kwargs) with span: @@ -893,7 +910,9 @@ async def trace_create_run_async(self, operation_name, function, *args, **kwargs return result def trace_submit_tool_outputs(self, stream, function, *args, **kwargs): - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name thread_id = kwargs.get("thread_id") run_id = kwargs.get("run_id") tool_outputs = kwargs.get("tool_outputs") @@ -907,7 +926,7 @@ def trace_submit_tool_outputs(self, stream, function, *args, **kwargs): event_handler=event_handler, ) - if span == None: + if span is None: return function(*args, **kwargs) with span: @@ -934,7 +953,9 @@ def trace_submit_tool_outputs(self, stream, function, *args, **kwargs): return result async def trace_submit_tool_outputs_async(self, stream, function, *args, **kwargs): - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name thread_id = kwargs.get("thread_id") run_id = kwargs.get("run_id") tool_outputs = kwargs.get("tool_outputs") @@ -1030,7 +1051,9 @@ async def trace_handle_submit_tool_outputs_async(self, function, *args, **kwargs def trace_create_stream(self, function, *args, **kwargs): operation_name = OperationName.PROCESS_THREAD_RUN - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name thread_id = kwargs.get("thread_id") assistant_id = kwargs.get("assistant_id") model = kwargs.get("model") @@ -1055,7 +1078,7 @@ def trace_create_stream(self, function, *args, **kwargs): additional_instructions=additional_instructions, additional_messages=additional_messages, temperature=temperature, - tools=tools, + _tools=tools, top_p=top_p, max_prompt_tokens=max_prompt_tokens, max_completion_tokens=max_completion_tokens, @@ -1088,7 +1111,9 @@ def trace_create_stream(self, function, *args, **kwargs): async def trace_create_stream_async(self, function, *args, **kwargs): operation_name = OperationName.PROCESS_THREAD_RUN - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name thread_id = kwargs.get("thread_id") assistant_id = kwargs.get("assistant_id") model = kwargs.get("model") @@ -1113,7 +1138,7 @@ async def trace_create_stream_async(self, function, *args, **kwargs): additional_instructions=additional_instructions, additional_messages=additional_messages, temperature=temperature, - tools=tools, + _tools=tools, top_p=top_p, max_prompt_tokens=max_prompt_tokens, max_completion_tokens=max_completion_tokens, @@ -1145,7 +1170,9 @@ async def trace_create_stream_async(self, function, *args, **kwargs): return result def trace_list_messages(self, function, *args, **kwargs): - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name thread_id = kwargs.get("thread_id") span = self.start_list_messages_span(project_name=project_name, thread_id=thread_id) @@ -1175,7 +1202,9 @@ def trace_list_messages(self, function, *args, **kwargs): return result async def trace_list_messages_async(self, function, *args, **kwargs): - project_name = args[0]._config.project_name + project_name = args[ # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + 0 + ]._config.project_name thread_id = kwargs.get("thread_id") span = self.start_list_messages_span(project_name=project_name, thread_id=thread_id) @@ -1204,7 +1233,7 @@ async def trace_list_messages_async(self, function, *args, **kwargs): return result - def handle_run_stream_exit(self, function, *args, **kwargs): + def handle_run_stream_exit(self, _function, *args, **kwargs): agent_run_stream = args[0] exc_type = kwargs.get("exc_type") exc_val = kwargs.get("exc_val") @@ -1267,9 +1296,9 @@ def _trace_sync_function( """ @functools.wraps(function) - def inner(*args, **kwargs): + def inner(*args, **kwargs): # pylint: disable=R0911 - span_impl_type = settings.tracing_implementation() + span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 if span_impl_type is None: return function(*args, **kwargs) @@ -1327,9 +1356,9 @@ def _trace_async_function( """ @functools.wraps(function) - async def inner(*args, **kwargs): + async def inner(*args, **kwargs): # pylint: disable=R0911 - span_impl_type = settings.tracing_implementation() + span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 if span_impl_type is None: return function(*args, **kwargs) @@ -1563,7 +1592,7 @@ def _set_enable_content_recording(self, enable_content_recording: bool = False) parameter names and parameter values are traced. :type enable_content_recording: bool """ - global _trace_agents_content + global _trace_agents_content # pylint: disable=W0603 _trace_agents_content = enable_content_recording def _is_content_recording_enabled(self) -> bool: @@ -1615,7 +1644,9 @@ def on_run_step(self, step: "RunStep") -> None: # todo - report errors for failure statuses here and in run ? if step.type == "tool_calls" and isinstance(step.step_details, RunStepToolCallDetails): - self.instrumentor._add_tool_assistant_message_event(self.span, step) + self.instrumentor._add_tool_assistant_message_event( # pylint: disable=protected-access # pyright: ignore [reportFunctionMemberAccess] + self.span, step + ) elif step.type == "message_creation" and step.status == RunStepStatus.COMPLETED: self.instrumentor.add_thread_message_event(self.span, cast(ThreadMessage, self.last_message), step.usage) self.last_message = None From 723c59bcac0d6b94fd7e9e4be87b3087052b8498 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 12 Nov 2024 14:45:37 -0800 Subject: [PATCH 118/138] Remove premature adding of asserts.json file --- sdk/ai/azure-ai-projects/assets.json | Bin 318 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 sdk/ai/azure-ai-projects/assets.json diff --git a/sdk/ai/azure-ai-projects/assets.json b/sdk/ai/azure-ai-projects/assets.json deleted file mode 100644 index f1dcbdd8a39e11c48cb03a3ee2c4bf638c163b35..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 318 zcmezWubP3Efr~)_3Y8cf8H#~0m7xSk1_9{;hI|Gk1}m^UNVbZhl%WVHuFsGNXX^sh zrZ8kP=mNzMx|JAoh%h?+8t(j;p28rkbeOCnZUlve*GT6ts>_C{S1ol%cm;?Y={4i_) From b0ec1edded6d76c68769af5a66ada04df5205a8d Mon Sep 17 00:00:00 2001 From: kdestin <101366538+kdestin@users.noreply.github.com> Date: Tue, 12 Nov 2024 19:12:09 -0500 Subject: [PATCH 119/138] [ai] fix(pylint): Resolving remaining pylint errors (#38500) * fix(pylint): Resolve R1705(no-else-return) * fix(pylint): Resolve C0207(use-maxsplit-arg) * fix(pylint): Resolve C0412(ungrouped-imports) * fix(pylint): Resolve W1401(anomalous-backslash-in-string) * fix(pylint): Resolve W0105(pointless-string-statement) * fix(pylint): Ignore W0221(arguments-differ) pylint has an open issue tracking false positives for arguments-differ with overloads in subclasses * fix(pylint): Ignore W0718(broad-exception-caught) * fix(pylint): Ignore E0401(import-error) and E0611(no-name-in-module) * fix(pylint): Ignore C4748(client-accepts-api-verison-keyword) Currently, neither client classes supports overriding the api version * fix(pylint): Ignore W0212(protected-access) * fix(pylint): resolve W0707(raise-missing-from) * fix(pylint): Ignore E1102(not-callable) pylint appears to fail to correctly infer that settings.tracing_implementation actually is callable * fix(pylint): Ignore W0231(super-init-not-called) * fix(pylint): Ignore W0613(unused-argument) * fix(pylint): Ignore W0236(invalid-overriden-method) * fix(pylint): Ignore R0914(too-many-locals) * fix(pylint): Ignore R0902(too-many-instance-attributes) * fix(pylint): Ignore R0915(too-many-statements) * fix(pylint): Ignore R0911(too-many-return-statements) --- .../azure/ai/projects/_patch.py | 5 +- .../azure/ai/projects/aio/_patch.py | 5 +- .../ai/projects/aio/operations/_operations.py | 6 +-- .../ai/projects/aio/operations/_patch.py | 20 ++++---- .../azure/ai/projects/models/_models.py | 24 +++++----- .../azure/ai/projects/models/_patch.py | 27 ++++------- .../ai/projects/operations/_operations.py | 6 +-- .../azure/ai/projects/operations/_patch.py | 46 +++++++++---------- .../agents/_ai_agents_instrumentor.py | 7 +-- .../ai/projects/telemetry/agents/_utils.py | 2 +- 10 files changed, 64 insertions(+), 84 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index 42231b5deca4..5aa6b4569aae 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -24,9 +24,8 @@ from .operations._patch import InferenceOperations -class AIProjectClient(ClientGenerated): - - def __init__( +class AIProjectClient(ClientGenerated): # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes + def __init__( # pylint: disable=super-init-not-called,too-many-statements self, endpoint: str, subscription_id: str, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index 7b163a8c6c5a..d2c480ac123f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -30,9 +30,8 @@ from azure.core.credentials_async import AsyncTokenCredential -class AIProjectClient(ClientGenerated): - - def __init__( +class AIProjectClient(ClientGenerated): # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes + def __init__( # pylint: disable=super-init-not-called,too-many-statements self, endpoint: str, subscription_id: str, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py index ec7576832710..aadb1c8245a1 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_operations.py @@ -613,7 +613,7 @@ async def update_agent( """ @distributed_trace_async - async def update_agent( + async def update_agent( # pylint: disable=too-many-locals self, assistant_id: str, body: Union[JSON, IO[bytes]] = _Unset, @@ -1952,7 +1952,7 @@ async def create_run( """ @distributed_trace_async - async def create_run( + async def create_run( # pylint: disable=too-many-locals self, thread_id: str, body: Union[JSON, IO[bytes]] = _Unset, @@ -2841,7 +2841,7 @@ async def create_thread_and_run( """ @distributed_trace_async - async def create_thread_and_run( + async def create_thread_and_run( # pylint: disable=too-many-locals self, body: Union[JSON, IO[bytes]] = _Unset, *, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 86144a36e20a..541b51e7cdcf 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -398,7 +398,7 @@ async def get_connection_string(self) -> str: """ if not self._connection_string: # Get the AI Studio Project properties, including Application Insights resource URL if exists - get_workspace_response: GetWorkspaceResponse = await self._outer_instance.connections._get_workspace() + get_workspace_response: GetWorkspaceResponse = await self._outer_instance.connections._get_workspace() # pylint: disable=protected-access if not get_workspace_response.properties.application_insights: raise ResourceNotFoundError("Application Insights resource was not enabled for this Project.") @@ -463,7 +463,7 @@ async def create_agent(self, body: JSON, *, content_type: str = "application/jso """ @overload - async def create_agent( + async def create_agent( # pylint: disable=arguments-differ self, *, model: str, @@ -526,7 +526,7 @@ async def create_agent( """ @overload - async def create_agent( + async def create_agent( # pylint: disable=arguments-differ self, *, model: str, @@ -696,7 +696,7 @@ async def update_agent( """ @overload - async def update_agent( + async def update_agent( # pylint: disable=arguments-differ self, assistant_id: str, *, @@ -764,7 +764,7 @@ async def update_agent( """ @overload - async def update_agent( + async def update_agent( # pylint: disable=arguments-differ self, assistant_id: str, *, @@ -990,7 +990,7 @@ async def create_run( """ @overload - async def create_run( + async def create_run( # pylint: disable=arguments-differ self, thread_id: str, *, @@ -1657,7 +1657,7 @@ async def submit_tool_outputs_to_run( """ @overload - async def submit_tool_outputs_to_run( + async def submit_tool_outputs_to_run( # pylint: disable=arguments-differ self, thread_id: str, run_id: str, @@ -1718,7 +1718,7 @@ async def submit_tool_outputs_to_run( body: Union[JSON, IO[bytes]] = _Unset, *, tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: Optional[_models.AsyncAgentEventHandler] = None, + event_handler: Optional[_models.AsyncAgentEventHandler] = None, # pylint: disable=unused-argument **kwargs: Any, ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool @@ -1918,7 +1918,7 @@ async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: """ @overload - async def upload_file( + async def upload_file( # pylint: disable=arguments-differ self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any ) -> _models.OpenAIFile: """Uploads a file for use by other operations. @@ -1936,7 +1936,7 @@ async def upload_file( """ @overload - async def upload_file( + async def upload_file( # pylint: disable=arguments-differ self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any ) -> _models.OpenAIFile: """Uploads a file for use by other operations. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index e1150d4e56aa..5e76e0658f77 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -230,7 +230,7 @@ class AgentsNamedToolChoice(_model_base.Model): """ type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field() - """the type of tool. If type is ``function``\ , the function name must be set. Required. Known + """the type of tool. If type is ``function``, the function name must be set. Required. Known values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", \"microsoft_fabric\", \"sharepoint_grounding\", and \"azure_ai_search\".""" function: Optional["_models.FunctionName"] = rest_field() @@ -3736,7 +3736,7 @@ class RunStepDeltaCodeInterpreterDetailItemObject(_model_base.Model): # pylint: """The input into the Code Interpreter tool call.""" outputs: Optional[List["_models.RunStepDeltaCodeInterpreterOutput"]] = rest_field() """The outputs from the Code Interpreter tool call. Code Interpreter can output one or more - items, including text (\ ``logs``\ ) or images (\ ``image``\ ). Each of these are represented + items, including text (``logs``) or images (``image``). Each of these are represented by a different object type.""" @@ -4930,9 +4930,9 @@ class ThreadMessageOptions(_model_base.Model): """The role of the entity that is creating the message. Allowed values include: - * ``user``\ : Indicates the message is sent by an actual user and should be used in most + * ``user``: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. - * ``assistant``\ : Indicates the message is generated by the agent. Use this value to insert + * ``assistant``: Indicates the message is generated by the agent. Use this value to insert messages from the agent into the conversation. Required. Known values are: \"user\" and \"assistant\".""" content: str = rest_field() @@ -5092,7 +5092,7 @@ class ThreadRun(_model_base.Model): Known values are: \"max_completion_tokens\" and \"max_prompt_tokens\".""" usage: "_models.RunCompletionUsage" = rest_field() """Usage statistics related to the run. This value will be ``null`` if the run is not in a - terminal state (i.e. ``in_progress``\ , ``queued``\ , etc.). Required.""" + terminal state (i.e. ``in_progress``, ``queued``, etc.). Required.""" temperature: Optional[float] = rest_field() """The sampling temperature used for this run. If not set, defaults to 1.""" top_p: Optional[float] = rest_field() @@ -5328,9 +5328,9 @@ class TruncationObject(_model_base.Model): type: Union[str, "_models.TruncationStrategy"] = rest_field() """The truncation strategy to use for the thread. The default is ``auto``. If set to - ``last_messages``\ , the thread will + ``last_messages``, the thread will be truncated to the ``lastMessages`` count most recent messages in the thread. When set to - ``auto``\ , messages in the middle of the thread + ``auto``, messages in the middle of the thread will be dropped to fit the context length of the model, ``max_prompt_tokens``. Required. Known values are: \"auto\" and \"last_messages\".""" last_messages: Optional[int] = rest_field() @@ -5512,7 +5512,7 @@ class VectorStore(_model_base.Model): file_counts: "_models.VectorStoreFileCount" = rest_field() """Files count grouped by status processed or being processed by this vector store. Required.""" status: Union[str, "_models.VectorStoreStatus"] = rest_field() - """The status of the vector store, which can be either ``expired``\ , ``in_progress``\ , or + """The status of the vector store, which can be either ``expired``, ``in_progress``, or ``completed``. A status of ``completed`` indicates that the vector store is ready for use. Required. Known values are: \"expired\", \"in_progress\", and \"completed\".""" expires_after: Optional["_models.VectorStoreExpirationPolicy"] = rest_field() @@ -5903,8 +5903,8 @@ class VectorStoreFile(_model_base.Model): vector_store_id: str = rest_field() """The ID of the vector store that the file is attached to. Required.""" status: Union[str, "_models.VectorStoreFileStatus"] = rest_field() - """The status of the vector store file, which can be either ``in_progress``\ , ``completed``\ , - ``cancelled``\ , or ``failed``. The status ``completed`` indicates that the vector store file + """The status of the vector store file, which can be either ``in_progress``, ``completed``, + ``cancelled``, or ``failed``. The status ``completed`` indicates that the vector store file is ready for use. Required. Known values are: \"in_progress\", \"completed\", \"failed\", and \"cancelled\".""" last_error: "_models.VectorStoreFileError" = rest_field() @@ -5973,8 +5973,8 @@ class VectorStoreFileBatch(_model_base.Model): vector_store_id: str = rest_field() """The ID of the vector store that the file is attached to. Required.""" status: Union[str, "_models.VectorStoreFileBatchStatus"] = rest_field() - """The status of the vector store files batch, which can be either ``in_progress``\ , - ``completed``\ , ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", + """The status of the vector store files batch, which can be either ``in_progress``, + ``completed``, ``cancelled`` or ``failed``. Required. Known values are: \"in_progress\", \"completed\", \"cancelled\", and \"failed\".""" file_counts: "_models.VectorStoreFileCount" = rest_field() """Files count grouped by status processed or being processed by this vector store. Required.""" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index d44308205a56..6dd737fcd3c1 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -245,7 +245,7 @@ def _refresh_token(self) -> None: self._sas_token = "" if connection is not None and connection.token_credential is not None: sas_credential = cast(SASTokenCredential, connection.token_credential) - self._sas_token = sas_credential._sas_token + self._sas_token = sas_credential._sas_token # pylint: disable=protected-access self._expires_on = SASTokenCredential._get_expiration_date_from_token(self._sas_token) logger.debug("[SASTokenCredential._refresh_token] Exit. New token expires on %s.", self._expires_on) @@ -288,7 +288,7 @@ def get_token( } -def _map_type(annotation) -> Dict[str, Any]: +def _map_type(annotation) -> Dict[str, Any]: # pylint: disable=too-many-return-statements if annotation == inspect.Parameter.empty: return {"type": "string"} # Default type if annotation is missing @@ -395,7 +395,7 @@ def _build_function_definitions(self, functions: Dict[str, Any]) -> List[Functio sig = inspect.signature(func) params = sig.parameters docstring = inspect.getdoc(func) or "" - description = docstring.split("\n")[0] if docstring else "No description" + description = docstring.split("\n", maxsplit=1)[0] if docstring else "No description" param_descriptions = {} for line in docstring.splitlines(): @@ -490,7 +490,7 @@ def execute(self, tool_call: RequiredFunctionToolCall) -> Any: class AsyncFunctionTool(BaseFunctionTool): - async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: + async def execute(self, tool_call: RequiredFunctionToolCall) -> Any: # pylint: disable=invalid-overridden-method function, parsed_arguments = self._get_func_and_args(tool_call) try: @@ -602,17 +602,6 @@ def definitions(self) -> List[ToolDefinition]: return [SharepointToolDefinition(sharepoint_grounding=ToolConnectionList(connection_list=self.connection_ids))] -""" - def updateConnections(self, connection_list: List[Tuple[str, str]]) -> None: -# use connection_list to auto-update connections for bing search tool if no pre-existing - if self.connection_ids.__len__() == 0: - for id, connection_type in connection_list: - if connection_type == "ApiKey": - self.connection_ids.append(id) - return -""" - - class FileSearchTool(Tool): """ A tool that searches for uploaded file information from the created vector stores. @@ -874,7 +863,7 @@ def execute_tool_calls(self, tool_calls: List[Any]) -> Any: "output": output, } tool_outputs.append(tool_output) - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught logging.error("Failed to execute tool call %s: %s", tool_call, e) return tool_outputs @@ -918,7 +907,7 @@ async def execute_tool_calls(self, tool_calls: List[Any]) -> Any: "output": output, } tool_outputs.append(tool_output) - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught logging.error("Failed to execute tool call %s: %s", tool_call, e) return tool_outputs @@ -1157,7 +1146,7 @@ async def _process_event(self, event_data_str: str) -> Tuple[str, Union[str, Str self.done = True # Mark the stream as done else: await self.event_handler.on_unhandled_event(event_type, event_data_obj) - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught logging.error("Error in event handler for event '%s': %s", event_type, e) return event_type, event_data_obj @@ -1306,7 +1295,7 @@ def _process_event(self, event_data_str: str) -> Tuple[str, Union[str, StreamEve self.done = True # Mark the stream as done else: self.event_handler.on_unhandled_event(event_type, event_data_obj) - except Exception as e: + except Exception as e: # pylint: disable=broad-exception-caught logging.error("Error in event handler for event '%s': %s", event_type, e) return event_type, event_data_obj diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py index 4eaf33658311..fb8be0461a78 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_operations.py @@ -2007,7 +2007,7 @@ def update_agent( """ @distributed_trace - def update_agent( + def update_agent( # pylint: disable=too-many-locals self, assistant_id: str, body: Union[JSON, IO[bytes]] = _Unset, @@ -3346,7 +3346,7 @@ def create_run( """ @distributed_trace - def create_run( + def create_run( # pylint: disable=too-many-locals self, thread_id: str, body: Union[JSON, IO[bytes]] = _Unset, @@ -4235,7 +4235,7 @@ def create_thread_and_run( """ @distributed_trace - def create_thread_and_run( + def create_thread_and_run( # pylint: disable=too-many-locals self, body: Union[JSON, IO[bytes]] = _Unset, *, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index dd2f4b5e975c..f384a1f52768 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -395,8 +395,7 @@ def _get_trace_exporter(destination: Union[TextIO, str, None]) -> Any: ) from e return ConsoleSpanExporter() - else: - raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") + raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") return None @@ -409,8 +408,8 @@ def _get_log_exporter(destination: Union[TextIO, str, None]) -> Any: # _logs are considered beta (not internal) in OpenTelemetry Python API/SDK. # So it's ok to use it for local development, but we'll swallow # any errors in case of any breaking changes on OTel side. - from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter # type: ignore - except Exception as ex: + from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter # type: ignore # pylint: disable=import-error,no-name-in-module + except Exception as ex: # pylint: disable=broad-exception-caught # since OTel logging is still in beta in Python, we're going to swallow any errors # and just warn about them. logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) @@ -430,8 +429,7 @@ def _get_log_exporter(destination: Union[TextIO, str, None]) -> Any: # and just warn about them. logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) return None - else: - raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") + raise ValueError("Only `sys.stdout` is supported at the moment for type `TextIO`") return None @@ -444,10 +442,10 @@ def _configure_tracing(span_exporter: Any) -> None: from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor - except ModuleNotFoundError as _: + except ModuleNotFoundError as e: raise ModuleNotFoundError( "OpenTelemetry SDK is not installed. Please install it using 'pip install opentelemetry-sdk'" - ) + ) from e # if tracing was not setup before, we need to create a new TracerProvider if not isinstance(trace.get_tracer_provider(), TracerProvider): @@ -472,9 +470,9 @@ def _configure_logging(log_exporter: Any) -> None: # So it's ok to use them for local development, but we'll swallow # any errors in case of any breaking changes on OTel side. from opentelemetry import _logs, _events - from opentelemetry.sdk._logs import LoggerProvider - from opentelemetry.sdk._events import EventLoggerProvider - from opentelemetry.sdk._logs.export import SimpleLogRecordProcessor + from opentelemetry.sdk._logs import LoggerProvider # pylint: disable=import-error,no-name-in-module + from opentelemetry.sdk._events import EventLoggerProvider # pylint: disable=import-error,no-name-in-module + from opentelemetry.sdk._logs.export import SimpleLogRecordProcessor # pylint: disable=import-error,no-name-in-module if not isinstance(_logs.get_logger_provider(), LoggerProvider): logger_provider = LoggerProvider() @@ -486,13 +484,13 @@ def _configure_logging(log_exporter: Any) -> None: logger_provider = cast(LoggerProvider, _logs.get_logger_provider()) logger_provider.add_log_record_processor(SimpleLogRecordProcessor(log_exporter)) _events.set_event_logger_provider(EventLoggerProvider(logger_provider)) - except Exception as ex: + except Exception as ex: # pylint: disable=broad-exception-caught # since OTel logging is still in beta in Python, we're going to swallow any errors # and just warn about them. logger.warning("Failed to configure OpenTelemetry logging.", exc_info=ex) -def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: +def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: # pylint: disable=unused-argument """Enable tracing and logging to console (sys.stdout), or to an OpenTelemetry Protocol (OTLP) endpoint. :param destination: `sys.stdout` to print telemetry to console or a string holding the @@ -535,7 +533,7 @@ def _enable_telemetry(destination: Union[TextIO, str, None], **kwargs) -> None: agents_instrumentor = AIAgentsInstrumentor() if not agents_instrumentor.is_instrumented(): agents_instrumentor.instrument() - except Exception as exc: + except Exception as exc: # pylint: disable=broad-exception-caught logger.warning("Could not call `AIAgentsInstrumentor().instrument()`", exc_info=exc) try: @@ -577,7 +575,7 @@ def get_connection_string(self) -> str: """ if not self._connection_string: # Get the AI Studio Project properties, including Application Insights resource URL if exists - get_workspace_response: GetWorkspaceResponse = self._outer_instance.connections._get_workspace() + get_workspace_response: GetWorkspaceResponse = self._outer_instance.connections._get_workspace() # pylint: disable=protected-access if not get_workspace_response.properties.application_insights: raise ResourceNotFoundError("Application Insights resource was not enabled for this Project.") @@ -641,7 +639,7 @@ def create_agent(self, body: JSON, *, content_type: str = "application/json", ** """ @overload - def create_agent( + def create_agent( # pylint: disable=arguments-differ self, *, model: str, @@ -704,7 +702,7 @@ def create_agent( """ @overload - def create_agent( + def create_agent( # pylint: disable=arguments-differ self, *, model: str, @@ -874,7 +872,7 @@ def update_agent( """ @overload - def update_agent( + def update_agent( # pylint: disable=arguments-differ self, assistant_id: str, *, @@ -942,7 +940,7 @@ def update_agent( """ @overload - def update_agent( + def update_agent( # pylint: disable=arguments-differ self, assistant_id: str, *, @@ -1168,7 +1166,7 @@ def create_run( """ @overload - def create_run( + def create_run( # pylint: disable=arguments-differ self, thread_id: str, *, @@ -1855,7 +1853,7 @@ def submit_tool_outputs_to_run( """ @overload - def submit_tool_outputs_to_run( + def submit_tool_outputs_to_run( # pylint: disable=arguments-differ self, thread_id: str, run_id: str, @@ -1916,7 +1914,7 @@ def submit_tool_outputs_to_run( body: Union[JSON, IO[bytes]] = _Unset, *, tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: Optional[_models.AgentEventHandler] = None, + event_handler: Optional[_models.AgentEventHandler] = None, # pylint: disable=unused-argument **kwargs: Any, ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool @@ -2116,7 +2114,7 @@ def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: """ @overload - def upload_file( + def upload_file( # pylint: disable=arguments-differ self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any ) -> _models.OpenAIFile: """Uploads a file for use by other operations. @@ -2134,7 +2132,7 @@ def upload_file( """ @overload - def upload_file( + def upload_file( # pylint: disable=arguments-differ self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any ) -> _models.OpenAIFile: """Uploads a file for use by other operations. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py index 075ee21f6299..90c152434efd 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_ai_agents_instrumentor.py @@ -56,6 +56,7 @@ ) from azure.core import CaseInsensitiveEnumMeta # type: ignore from azure.core.settings import settings +from azure.core.tracing import AbstractSpan _Unset: Any = object() @@ -63,11 +64,8 @@ # pylint: disable = no-name-in-module from opentelemetry.trace import Span, StatusCode - from azure.core.tracing import AbstractSpan # pylint: disable=C0412 # type: ignore - _tracing_library_available = True except ModuleNotFoundError: - _tracing_library_available = False @@ -491,7 +489,6 @@ def start_submit_tool_outputs_span( tool_outputs: Optional[List[ToolOutput]] = None, event_handler: Optional[AgentEventHandler] = None, ) -> "Optional[AbstractSpan]": - run_span = event_handler.span if isinstance(event_handler, _AgentEventHandlerTraceWrapper) else None recorded = self._add_tool_message_events(run_span, tool_outputs) @@ -1297,7 +1294,6 @@ def _trace_sync_function( @functools.wraps(function) def inner(*args, **kwargs): # pylint: disable=R0911 - span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 if span_impl_type is None: return function(*args, **kwargs) @@ -1357,7 +1353,6 @@ def _trace_async_function( @functools.wraps(function) async def inner(*args, **kwargs): # pylint: disable=R0911 - span_impl_type = settings.tracing_implementation() # pylint: disable=E1102 if span_impl_type is None: return function(*args, **kwargs) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py index 92f12a77ca12..bdc18e1381e8 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/telemetry/agents/_utils.py @@ -12,7 +12,7 @@ try: from opentelemetry.trace import StatusCode, Span # noqa: F401 # pylint: disable=unused-import - _span_impl_type = settings.tracing_implementation() + _span_impl_type = settings.tracing_implementation() # pylint: disable=not-callable except ModuleNotFoundError: _span_impl_type = None From 64fadfe06dfae4ed60882c5759b045af6238acf1 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 12 Nov 2024 17:27:56 -0800 Subject: [PATCH 120/138] Disable all agents tests. Push recordings for all other tests to the assents folder (#38481) --- sdk/ai/azure-ai-projects/assets.json | 6 ++++++ sdk/ai/azure-ai-projects/azure_ai_projects_tests.env | 11 +++++++---- sdk/ai/azure-ai-projects/dev_requirements.txt | 3 +++ ...chat_completions_with_azure_openai_client_async.py | 2 +- sdk/ai/azure-ai-projects/tests/conftest.py | 4 ++++ 5 files changed, 21 insertions(+), 5 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/assets.json diff --git a/sdk/ai/azure-ai-projects/assets.json b/sdk/ai/azure-ai-projects/assets.json new file mode 100644 index 000000000000..e2fca54c37e0 --- /dev/null +++ b/sdk/ai/azure-ai-projects/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "python", + "TagPrefix": "python/ai/azure-ai-projects", + "Tag": "python/ai/azure-ai-projects_a5498ae251" +} diff --git a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env index 3280ed9d6826..53ce3e343906 100644 --- a/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env +++ b/sdk/ai/azure-ai-projects/azure_ai_projects_tests.env @@ -1,9 +1,12 @@ # # Environment variables required for running tests # +# All values should be empty by default. Fill them in locally before running live tests on your dev box, +# but do not commit these changes to the repository. +# -# Default to live tests without recordings: -AZURE_TEST_RUN_LIVE=true +# The default here should be to run tests from recordings: +AZURE_TEST_RUN_LIVE=false AZURE_SKIP_LIVE_RECORDING=true PROXY_URL=http://localhost:5000 @@ -12,8 +15,8 @@ PROXY_URL=http://localhost:5000 # # To run connection tests you need an AI Studio project with # - At least one AIServices resource connected -# - At lease one Azure OpenAI resource connected -# and you will need to define the 5 environment varabled below. +# - At least one Azure OpenAI resource connected +# and you will need to define the 5 environment variables below. # Ideally you have more than one AIServices and Azure OpenAI resources connected, # such that you set a connection name that is different than the default connection name. # diff --git a/sdk/ai/azure-ai-projects/dev_requirements.txt b/sdk/ai/azure-ai-projects/dev_requirements.txt index 751bb54c16ea..0b28efcde9bc 100644 --- a/sdk/ai/azure-ai-projects/dev_requirements.txt +++ b/sdk/ai/azure-ai-projects/dev_requirements.txt @@ -5,3 +5,6 @@ aiohttp azure-ai-inference openai +opentelemetry-sdk +opentelemetry-exporter-otlp-proto-grpc +azure-ai-ml diff --git a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py index 05bf8c76ed18..1da5f9cde2f4 100644 --- a/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py +++ b/sdk/ai/azure-ai-projects/samples/inference/async_samples/sample_chat_completions_with_azure_openai_client_async.py @@ -13,7 +13,7 @@ Before running the sample: - pip install azure-ai-projects aiohttp openai_async + pip install azure-ai-projects aiohttp openai Set these environment variables with your own values: * PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. diff --git a/sdk/ai/azure-ai-projects/tests/conftest.py b/sdk/ai/azure-ai-projects/tests/conftest.py index 563cc1239509..f2ba2ee56f93 100644 --- a/sdk/ai/azure-ai-projects/tests/conftest.py +++ b/sdk/ai/azure-ai-projects/tests/conftest.py @@ -18,6 +18,10 @@ if not load_dotenv(find_dotenv(filename="azure_ai_projects_tests.env"), override=True): print("Failed to apply environment variables for azure-ai-projects tests.") +def pytest_collection_modifyitems(items): + for item in items: + if 'tests\\agents' in item.fspath.strpath or 'tests/agents' in item.fspath.strpath: + item.add_marker(pytest.mark.skip(reason="Skip running Agents tests in PR pipeline until test recordings are available")) class SanitizedValues: SUBSCRIPTION_ID = "00000000-0000-0000-0000-000000000000" From 6465546631f0344ac2fa78a617f8782eb185b35b Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Tue, 12 Nov 2024 17:32:38 -0800 Subject: [PATCH 121/138] run "black --config ../../../eng/black-pyproject.toml ." --- sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py | 4 +++- .../azure-ai-projects/azure/ai/projects/aio/_patch.py | 4 +++- .../azure/ai/projects/aio/operations/_patch.py | 4 +++- .../azure/ai/projects/operations/_patch.py | 8 ++++++-- sdk/ai/azure-ai-projects/tests/conftest.py | 10 +++++++--- 5 files changed, 22 insertions(+), 8 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index 5aa6b4569aae..ea4cd7bfb3be 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -24,7 +24,9 @@ from .operations._patch import InferenceOperations -class AIProjectClient(ClientGenerated): # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes +class AIProjectClient( + ClientGenerated +): # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes def __init__( # pylint: disable=super-init-not-called,too-many-statements self, endpoint: str, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index d2c480ac123f..b544d0913c79 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -30,7 +30,9 @@ from azure.core.credentials_async import AsyncTokenCredential -class AIProjectClient(ClientGenerated): # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes +class AIProjectClient( + ClientGenerated +): # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes def __init__( # pylint: disable=super-init-not-called,too-many-statements self, endpoint: str, diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 541b51e7cdcf..d337f73e685f 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -398,7 +398,9 @@ async def get_connection_string(self) -> str: """ if not self._connection_string: # Get the AI Studio Project properties, including Application Insights resource URL if exists - get_workspace_response: GetWorkspaceResponse = await self._outer_instance.connections._get_workspace() # pylint: disable=protected-access + get_workspace_response: GetWorkspaceResponse = ( + await self._outer_instance.connections._get_workspace() + ) # pylint: disable=protected-access if not get_workspace_response.properties.application_insights: raise ResourceNotFoundError("Application Insights resource was not enabled for this Project.") diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index f384a1f52768..58687f19c8d8 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -472,7 +472,9 @@ def _configure_logging(log_exporter: Any) -> None: from opentelemetry import _logs, _events from opentelemetry.sdk._logs import LoggerProvider # pylint: disable=import-error,no-name-in-module from opentelemetry.sdk._events import EventLoggerProvider # pylint: disable=import-error,no-name-in-module - from opentelemetry.sdk._logs.export import SimpleLogRecordProcessor # pylint: disable=import-error,no-name-in-module + from opentelemetry.sdk._logs.export import ( + SimpleLogRecordProcessor, + ) # pylint: disable=import-error,no-name-in-module if not isinstance(_logs.get_logger_provider(), LoggerProvider): logger_provider = LoggerProvider() @@ -575,7 +577,9 @@ def get_connection_string(self) -> str: """ if not self._connection_string: # Get the AI Studio Project properties, including Application Insights resource URL if exists - get_workspace_response: GetWorkspaceResponse = self._outer_instance.connections._get_workspace() # pylint: disable=protected-access + get_workspace_response: GetWorkspaceResponse = ( + self._outer_instance.connections._get_workspace() + ) # pylint: disable=protected-access if not get_workspace_response.properties.application_insights: raise ResourceNotFoundError("Application Insights resource was not enabled for this Project.") diff --git a/sdk/ai/azure-ai-projects/tests/conftest.py b/sdk/ai/azure-ai-projects/tests/conftest.py index f2ba2ee56f93..73e9337bffe9 100644 --- a/sdk/ai/azure-ai-projects/tests/conftest.py +++ b/sdk/ai/azure-ai-projects/tests/conftest.py @@ -18,10 +18,14 @@ if not load_dotenv(find_dotenv(filename="azure_ai_projects_tests.env"), override=True): print("Failed to apply environment variables for azure-ai-projects tests.") + def pytest_collection_modifyitems(items): - for item in items: - if 'tests\\agents' in item.fspath.strpath or 'tests/agents' in item.fspath.strpath: - item.add_marker(pytest.mark.skip(reason="Skip running Agents tests in PR pipeline until test recordings are available")) + for item in items: + if "tests\\agents" in item.fspath.strpath or "tests/agents" in item.fspath.strpath: + item.add_marker( + pytest.mark.skip(reason="Skip running Agents tests in PR pipeline until test recordings are available") + ) + class SanitizedValues: SUBSCRIPTION_ID = "00000000-0000-0000-0000-000000000000" From b210645f4355e025145ddc45d26b8061a8115de6 Mon Sep 17 00:00:00 2001 From: nick863 <30440255+nick863@users.noreply.github.com> Date: Wed, 13 Nov 2024 06:54:49 -0800 Subject: [PATCH 122/138] Fix pylint and new mypy issues (#38503) * Ignore most of pylint errors * Add generated paths to pylint.rc * Use backslash for consistency in pylintrc files --------- Co-authored-by: Darren Cohen <39422044+dargilco@users.noreply.github.com> --- eng/pylintrc | 1 + pylintrc | 1 + .../azure/ai/projects/aio/operations/_patch.py | 14 ++++++++++---- .../azure/ai/projects/operations/_patch.py | 10 ++++++++-- sdk/ai/azure-ai-projects/pyproject.toml | 7 ++++++- 5 files changed, 26 insertions(+), 7 deletions(-) diff --git a/eng/pylintrc b/eng/pylintrc index 2346a3148a90..5fec693ee85b 100644 --- a/eng/pylintrc +++ b/eng/pylintrc @@ -7,6 +7,7 @@ reports=no ignore-paths= azure\\mixedreality\\remoterendering\\_api_version.py, azure/mixedreality/remoterendering/_api_version.py, + (?:.*[/\\]|^)projects/(models/_models.py|_model_base.py|operations/_operations.py|aio/operations/_operations.py)$, # Exclude any path that contains the following directory names (?:.*[/\\]|^)(?:_vendor|_generated|_restclient|samples|examples|test|tests|doc|\.tox)(?:[/\\]|$) diff --git a/pylintrc b/pylintrc index 480b4c27bcc0..8d64111ed85c 100644 --- a/pylintrc +++ b/pylintrc @@ -7,6 +7,7 @@ reports=no ignore-paths= azure\\mixedreality\\remoterendering\\_api_version.py, azure/mixedreality/remoterendering/_api_version.py, + (?:.*[/\\]|^)projects/(models/_models.py|_model_base.py|operations/_operations.py|aio/operations/_operations.py)$, # Exclude any path that contains the following directory names (?:.*[/\\]|^)(?:_vendor|_generated|_restclient|samples|examples|test|tests|doc|\.tox)(?:[/\\]|$) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index d337f73e685f..7c5ab2da74d5 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -399,8 +399,8 @@ async def get_connection_string(self) -> str: if not self._connection_string: # Get the AI Studio Project properties, including Application Insights resource URL if exists get_workspace_response: GetWorkspaceResponse = ( - await self._outer_instance.connections._get_workspace() - ) # pylint: disable=protected-access + await self._outer_instance.connections._get_workspace() # pylint: disable=protected-access + ) if not get_workspace_response.properties.application_insights: raise ResourceNotFoundError("Application Insights resource was not enabled for this Project.") @@ -464,6 +464,7 @@ async def create_agent(self, body: JSON, *, content_type: str = "application/jso :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload async def create_agent( # pylint: disable=arguments-differ self, @@ -527,6 +528,7 @@ async def create_agent( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload async def create_agent( # pylint: disable=arguments-differ self, @@ -697,6 +699,7 @@ async def update_agent( :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload async def update_agent( # pylint: disable=arguments-differ self, @@ -765,6 +768,7 @@ async def update_agent( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload async def update_agent( # pylint: disable=arguments-differ self, @@ -991,6 +995,7 @@ async def create_run( :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload async def create_run( # pylint: disable=arguments-differ self, @@ -1658,6 +1663,7 @@ async def submit_tool_outputs_to_run( :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload async def submit_tool_outputs_to_run( # pylint: disable=arguments-differ self, @@ -1720,7 +1726,6 @@ async def submit_tool_outputs_to_run( body: Union[JSON, IO[bytes]] = _Unset, *, tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: Optional[_models.AsyncAgentEventHandler] = None, # pylint: disable=unused-argument **kwargs: Any, ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool @@ -1735,7 +1740,6 @@ async def submit_tool_outputs_to_run( :type body: JSON or IO[bytes] :keyword tool_outputs: Required. :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword event_handler: The event handler to use for processing events during the run. :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -1919,6 +1923,7 @@ async def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload async def upload_file( # pylint: disable=arguments-differ self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any @@ -1937,6 +1942,7 @@ async def upload_file( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload async def upload_file( # pylint: disable=arguments-differ self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 58687f19c8d8..43c7ced11cbf 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -642,6 +642,7 @@ def create_agent(self, body: JSON, *, content_type: str = "application/json", ** :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload def create_agent( # pylint: disable=arguments-differ self, @@ -705,6 +706,7 @@ def create_agent( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload def create_agent( # pylint: disable=arguments-differ self, @@ -875,6 +877,7 @@ def update_agent( :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload def update_agent( # pylint: disable=arguments-differ self, @@ -943,6 +946,7 @@ def update_agent( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload def update_agent( # pylint: disable=arguments-differ self, @@ -1169,6 +1173,7 @@ def create_run( :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload def create_run( # pylint: disable=arguments-differ self, @@ -1856,6 +1861,7 @@ def submit_tool_outputs_to_run( :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload def submit_tool_outputs_to_run( # pylint: disable=arguments-differ self, @@ -1918,7 +1924,6 @@ def submit_tool_outputs_to_run( body: Union[JSON, IO[bytes]] = _Unset, *, tool_outputs: List[_models.ToolOutput] = _Unset, - event_handler: Optional[_models.AgentEventHandler] = None, # pylint: disable=unused-argument **kwargs: Any, ) -> _models.ThreadRun: """Submits outputs from tools as requested by tool calls in a run. Runs that need submitted tool @@ -1933,7 +1938,6 @@ def submit_tool_outputs_to_run( :type body: JSON or IO[bytes] :keyword tool_outputs: Required. :paramtype tool_outputs: list[~azure.ai.projects.models.ToolOutput] - :keyword event_handler: The event handler to use for processing events during the run. :return: ThreadRun. The ThreadRun is compatible with MutableMapping :rtype: ~azure.ai.projects.models.ThreadRun :raises ~azure.core.exceptions.HttpResponseError: @@ -2117,6 +2121,7 @@ def upload_file(self, body: JSON, **kwargs: Any) -> _models.OpenAIFile: :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload def upload_file( # pylint: disable=arguments-differ self, *, file: FileType, purpose: Union[str, _models.FilePurpose], filename: Optional[str] = None, **kwargs: Any @@ -2135,6 +2140,7 @@ def upload_file( # pylint: disable=arguments-differ :raises ~azure.core.exceptions.HttpResponseError: """ + # pylint: disable=arguments-differ @overload def upload_file( # pylint: disable=arguments-differ self, *, file_path: str, purpose: Union[str, _models.FilePurpose], **kwargs: Any diff --git a/sdk/ai/azure-ai-projects/pyproject.toml b/sdk/ai/azure-ai-projects/pyproject.toml index de7bf5a6932b..f81ce6125413 100644 --- a/sdk/ai/azure-ai-projects/pyproject.toml +++ b/sdk/ai/azure-ai-projects/pyproject.toml @@ -12,7 +12,12 @@ exclude = [ "sample_agents_with_file_search_attachment_async\\.py", "sample_agents_code_interpreter_attachment_enterprise_search_async\\.py", "sample_agents_code_interpreter_attachment_enterprise_search_async\\.py", - "sample_agents_code_interpreter_attachment_async\\.py" + "sample_agents_code_interpreter_attachment_async\\.py", + # Chat completion client expects list of parent class type and gets child type instead + "sample_chat_completions_with_azure_ai_inference_client_and_console_tracing\\.py", + "sample_chat_completions_with_azure_ai_inference_client_and_azure_monitor_tracing\\.py", + "sample_chat_completions_with_azure_ai_inference_client\\.py", + "sample_connections.py" ] warn_unused_configs = true ignore_missing_imports = true From bc0c7885c8bd4db1dcc2eb5f43eec74cac0fa62b Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 13 Nov 2024 07:01:19 -0800 Subject: [PATCH 123/138] Remove async from .telemetry.enable (#38514) --- .../azure/ai/projects/aio/operations/_patch.py | 2 +- .../sample_agents_basics_async_with_console_tracing.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 7c5ab2da74d5..140d608f0464 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -416,7 +416,7 @@ async def get_connection_string(self) -> str: # TODO: what about `set AZURE_TRACING_GEN_AI_CONTENT_RECORDING_ENABLED=true`? # TODO: This could be a class method. But we don't have a class property AIProjectClient.telemetry - async def enable(self, *, destination: Union[TextIO, str, None] = None, **kwargs) -> None: + def enable(self, *, destination: Union[TextIO, str, None] = None, **kwargs) -> None: """Enables distributed tracing and logging with OpenTelemetry for Azure AI clients and popular GenAI libraries. diff --git a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py index a0e6b26b6cad..9b667f4735f2 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py +++ b/sdk/ai/azure-ai-projects/samples/agents/async_samples/sample_agents_basics_async_with_console_tracing.py @@ -54,7 +54,7 @@ async def main() -> None: # Enable console tracing # or, if you have local OTLP endpoint running, change it to # project_client.telemetry.enable(destination="http://localhost:4317") - await project_client.telemetry.enable(destination=sys.stdout) + project_client.telemetry.enable(destination=sys.stdout) async with project_client: agent = await project_client.agents.create_agent( From fad94f93a5d997dbc0337cc74b0533165293cffd Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 13 Nov 2024 08:03:02 -0800 Subject: [PATCH 124/138] Fix pylint --- .../azure-ai-projects/azure/ai/projects/operations/_patch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 43c7ced11cbf..54c33d39f3ab 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -578,8 +578,8 @@ def get_connection_string(self) -> str: if not self._connection_string: # Get the AI Studio Project properties, including Application Insights resource URL if exists get_workspace_response: GetWorkspaceResponse = ( - self._outer_instance.connections._get_workspace() - ) # pylint: disable=protected-access + self._outer_instance.connections._get_workspace() # pylint: disable=protected-access + ) if not get_workspace_response.properties.application_insights: raise ResourceNotFoundError("Application Insights resource was not enabled for this Project.") From f6dc2057a30038421be3718e9f2ad9b25960e3fd Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 13 Nov 2024 09:40:03 -0800 Subject: [PATCH 125/138] Rename `with_credentials` to `include_credentials` per apiview.dev review comment (#38517) --- .../ai/projects/aio/operations/_patch.py | 28 +++++++++---------- .../azure/ai/projects/models/_patch.py | 2 +- .../azure/ai/projects/operations/_patch.py | 28 +++++++++---------- .../async_samples/sample_connections_async.py | 4 +-- .../samples/connections/sample_connections.py | 4 +-- .../tests/connections/connection_test_base.py | 4 +-- .../tests/connections/test_connections.py | 24 ++++++++-------- .../connections/test_connections_async.py | 24 ++++++++-------- 8 files changed, 59 insertions(+), 59 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 140d608f0464..285de5ffb9a8 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -91,11 +91,11 @@ async def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient" if use_serverless_connection: connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + connection_type=ConnectionType.SERVERLESS, include_credentials=True, **kwargs ) else: connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, **kwargs + connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True, **kwargs ) try: @@ -166,11 +166,11 @@ async def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": if use_serverless_connection: connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + connection_type=ConnectionType.SERVERLESS, include_credentials=True, **kwargs ) else: connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, **kwargs + connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True, **kwargs ) try: @@ -233,7 +233,7 @@ async def get_azure_openai_client(self, *, api_version: Optional[str] = None, ** """ kwargs.setdefault("merge_span", True) connection = await self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs + connection_type=ConnectionType.AZURE_OPEN_AI, include_credentials=True, **kwargs ) try: @@ -279,7 +279,7 @@ class ConnectionsOperations(ConnectionsOperationsGenerated): @distributed_trace_async async def get_default( - self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any + self, *, connection_type: ConnectionType, include_credentials: bool = False, **kwargs: Any ) -> ConnectionProperties: """Get the properties of the default connection of a certain connection type, with or without populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError @@ -287,9 +287,9 @@ async def get_default( :keyword connection_type: The connection type. Required. :type connection_type: ~azure.ai.projects.models._models.ConnectionType - :keyword with_credentials: Whether to populate the connection properties with authentication credentials. + :keyword include_credentials: Whether to populate the connection properties with authentication credentials. Optional. - :type with_credentials: bool + :type include_credentials: bool :return: The connection properties, or `None` if there are no connections of the specified type. :rtype: ~azure.ai.projects.model.ConnectionProperties :raises ~azure.core.exceptions.ResourceNotFoundError: @@ -302,24 +302,24 @@ async def get_default( # and return the first one connection_properties_list = await self.list(connection_type=connection_type, **kwargs) if len(connection_properties_list) > 0: - if with_credentials: + if include_credentials: return await self.get( - connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs + connection_name=connection_properties_list[0].name, include_credentials=include_credentials, **kwargs ) return connection_properties_list[0] raise ResourceNotFoundError(f"No connection of type {connection_type} found") @distributed_trace_async - async def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: + async def get(self, *, connection_name: str, include_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: """Get the properties of a single connection, given its connection name, with or without populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError exception if a connection with the given name was not found. :keyword connection_name: Connection Name. Required. :type connection_name: str - :keyword with_credentials: Whether to populate the connection properties with authentication credentials. + :keyword include_credentials: Whether to populate the connection properties with authentication credentials. Optional. - :type with_credentials: bool + :type include_credentials: bool :return: The connection properties, or `None` if a connection with this name does not exist. :rtype: ~azure.ai.projects.models.ConnectionProperties :raises ~azure.core.exceptions.ResourceNotFoundError: @@ -328,7 +328,7 @@ async def get(self, *, connection_name: str, with_credentials: bool = False, **k kwargs.setdefault("merge_span", True) if not connection_name: raise ValueError("Endpoint name cannot be empty") - if with_credentials: + if include_credentials: connection: GetConnectionResponse = await self._get_connection_with_secrets( connection_name=connection_name, ignored="ignore", **kwargs ) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 6dd737fcd3c1..994c80ba60b3 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -240,7 +240,7 @@ def _refresh_token(self) -> None: project_name=self._project_name, ) - connection = project_client.connections.get(connection_name=self._connection_name, with_credentials=True) + connection = project_client.connections.get(connection_name=self._connection_name, include_credentials=True) self._sas_token = "" if connection is not None and connection.token_credential is not None: diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 54c33d39f3ab..14b5bf6874fe 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -81,11 +81,11 @@ def get_chat_completions_client(self, **kwargs) -> "ChatCompletionsClient": if use_serverless_connection: connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + connection_type=ConnectionType.SERVERLESS, include_credentials=True, **kwargs ) else: connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, **kwargs + connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True, **kwargs ) try: @@ -155,11 +155,11 @@ def get_embeddings_client(self, **kwargs) -> "EmbeddingsClient": if use_serverless_connection: connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.SERVERLESS, with_credentials=True, **kwargs + connection_type=ConnectionType.SERVERLESS, include_credentials=True, **kwargs ) else: connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=True, **kwargs + connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=True, **kwargs ) try: @@ -223,7 +223,7 @@ def get_azure_openai_client(self, *, api_version: Optional[str] = None, **kwargs kwargs.setdefault("merge_span", True) connection = self._outer_instance.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=True, **kwargs + connection_type=ConnectionType.AZURE_OPEN_AI, include_credentials=True, **kwargs ) try: @@ -270,7 +270,7 @@ class ConnectionsOperations(ConnectionsOperationsGenerated): @distributed_trace def get_default( - self, *, connection_type: ConnectionType, with_credentials: bool = False, **kwargs: Any + self, *, connection_type: ConnectionType, include_credentials: bool = False, **kwargs: Any ) -> ConnectionProperties: """Get the properties of the default connection of a certain connection type, with or without populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError @@ -278,9 +278,9 @@ def get_default( :keyword connection_type: The connection type. Required. :type connection_type: ~azure.ai.projects.models._models.ConnectionType - :keyword with_credentials: Whether to populate the connection properties with authentication credentials. + :keyword include_credentials: Whether to populate the connection properties with authentication credentials. Optional. - :type with_credentials: bool + :type include_credentials: bool :return: The connection properties, or `None` if there are no connections of the specified type. :rtype: ~azure.ai.projects.models.ConnectionProperties :raises ~azure.core.exceptions.ResourceNotFoundError: @@ -293,24 +293,24 @@ def get_default( # and return the first one connection_properties_list = self.list(connection_type=connection_type, **kwargs) if len(connection_properties_list) > 0: - if with_credentials: + if include_credentials: return self.get( - connection_name=connection_properties_list[0].name, with_credentials=with_credentials, **kwargs + connection_name=connection_properties_list[0].name, include_credentials=include_credentials, **kwargs ) return connection_properties_list[0] raise ResourceNotFoundError(f"No connection of type {connection_type} found") @distributed_trace - def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: + def get(self, *, connection_name: str, include_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: """Get the properties of a single connection, given its connection name, with or without populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError exception if a connection with the given name was not found. :keyword connection_name: Connection Name. Required. :type connection_name: str - :keyword with_credentials: Whether to populate the connection properties with authentication credentials. + :keyword include_credentials: Whether to populate the connection properties with authentication credentials. Optional. - :type with_credentials: bool + :type include_credentials: bool :return: The connection properties, or `None` if a connection with this name does not exist. :rtype: ~azure.ai.projects.models.ConnectionProperties :raises ~azure.core.exceptions.ResourceNotFoundError: @@ -319,7 +319,7 @@ def get(self, *, connection_name: str, with_credentials: bool = False, **kwargs: kwargs.setdefault("merge_span", True) if not connection_name: raise ValueError("Connection name cannot be empty") - if with_credentials: + if include_credentials: connection: GetConnectionResponse = self._get_connection_with_secrets( connection_name=connection_name, ignored="ignore", **kwargs ) diff --git a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py index 79e54f4a4532..b7392acb05ce 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py +++ b/sdk/ai/azure-ai-projects/samples/connections/async_samples/sample_connections_async.py @@ -62,7 +62,7 @@ async def sample_connections_async() -> None: # Get the properties of the default connection of a particular "type", with credentials connection = await project_client.connections.get_default( connection_type=ConnectionType.AZURE_AI_SERVICES, - with_credentials=True, # Optional. Defaults to "False" + include_credentials=True, # Optional. Defaults to "False" ) print("====> Get default Azure AI Services connection:") print(connection) @@ -70,7 +70,7 @@ async def sample_connections_async() -> None: # Get the properties of a connection by connection name: connection = await project_client.connections.get( connection_name=connection_name, - with_credentials=True, # Optional. Defaults to "False" + include_credentials=True, # Optional. Defaults to "False" ) print("====> Get connection by name:") print(connection) diff --git a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py index abff4010a031..0bd3fb14ab46 100644 --- a/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py +++ b/sdk/ai/azure-ai-projects/samples/connections/sample_connections.py @@ -58,14 +58,14 @@ # Get the properties of the default connection of a particular "type", with credentials connection = project_client.connections.get_default( connection_type=ConnectionType.AZURE_AI_SERVICES, - with_credentials=True, # Optional. Defaults to "False" + include_credentials=True, # Optional. Defaults to "False" ) print("====> Get default Azure AI Services connection:") print(connection) # Get the properties of a connection by connection name: connection = project_client.connections.get( - connection_name=connection_name, with_credentials=True # Optional. Defaults to "False" + connection_name=connection_name, include_credentials=True # Optional. Defaults to "False" ) print("====> Get connection by name:") print(connection) diff --git a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py index c44ece6b70e0..f1a00a80aa3a 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py +++ b/sdk/ai/azure-ai-projects/tests/connections/connection_test_base.py @@ -68,7 +68,7 @@ def get_async_client(self, **kwargs) -> AIProjectClientAsync: def validate_connection( cls, connection: ConnectionProperties, - with_credentials: bool, + include_credentials: bool, *, expected_connection_type: ConnectionType = None, expected_connection_name: str = None, @@ -91,7 +91,7 @@ def validate_connection( else: assert connection.authentication_type is not None - if with_credentials: + if include_credentials: assert (connection.key is not None) ^ (connection.token_credential is not None) else: assert connection.key == None diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py index 9bc7d4f90e6e..2008c77fce1e 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections.py @@ -21,12 +21,12 @@ def test_connections_get(self, **kwargs): with self.get_sync_client(**kwargs) as project_client: - for with_credentials in [True, False]: + for include_credentials in [True, False]: try: _ = project_client.connections.get( connection_name=ConnectionsTestBase.NON_EXISTING_CONNECTION_NAME, - with_credentials=with_credentials, + include_credentials=include_credentials, ) assert False except ResourceNotFoundError as e: @@ -34,23 +34,23 @@ def test_connections_get(self, **kwargs): assert ConnectionsTestBase.EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_NAME in e.message connection = project_client.connections.get( - connection_name=aoai_connection, with_credentials=with_credentials + connection_name=aoai_connection, include_credentials=include_credentials ) print(connection) ConnectionsTestBase.validate_connection( connection, - with_credentials, + include_credentials, expected_connection_name=aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI, ) connection = project_client.connections.get( - connection_name=aiservices_connection, with_credentials=with_credentials + connection_name=aiservices_connection, include_credentials=include_credentials ) print(connection) ConnectionsTestBase.validate_connection( connection, - with_credentials, + include_credentials, expected_connection_name=aiservices_connection, expected_connection_type=ConnectionType.AZURE_AI_SERVICES, ) @@ -65,11 +65,11 @@ def test_connections_get_default(self, **kwargs): with self.get_sync_client(**kwargs) as project_client: - for with_credentials in [True, False]: + for include_credentials in [True, False]: try: _ = project_client.connections.get_default( connection_type=ConnectionsTestBase.NON_EXISTING_CONNECTION_TYPE, - with_credentials=with_credentials, + include_credentials=include_credentials, ) assert False except ResourceNotFoundError as e: @@ -77,23 +77,23 @@ def test_connections_get_default(self, **kwargs): assert ConnectionsTestBase.EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_TYPE in e.message connection = project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=with_credentials + connection_type=ConnectionType.AZURE_OPEN_AI, include_credentials=include_credentials ) print(connection) ConnectionsTestBase.validate_connection( connection, - with_credentials, + include_credentials, expected_connection_name=default_aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI, ) connection = project_client.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=with_credentials + connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=include_credentials ) print(connection) ConnectionsTestBase.validate_connection( connection, - with_credentials, + include_credentials, expected_connection_name=default_serverless_connection, expected_connection_type=ConnectionType.AZURE_AI_SERVICES, ) diff --git a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py index 0c7ba272af09..a82482947a2c 100644 --- a/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py +++ b/sdk/ai/azure-ai-projects/tests/connections/test_connections_async.py @@ -20,11 +20,11 @@ async def test_connections_get_async(self, **kwargs): async with self.get_async_client(**kwargs) as project_client: - for with_credentials in [True, False]: + for include_credentials in [True, False]: try: _ = await project_client.connections.get( connection_name=ConnectionsTestBase.NON_EXISTING_CONNECTION_NAME, - with_credentials=with_credentials, + include_credentials=include_credentials, ) assert False except ResourceNotFoundError as e: @@ -32,23 +32,23 @@ async def test_connections_get_async(self, **kwargs): assert ConnectionsTestBase.EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_NAME in e.message connection = await project_client.connections.get( - connection_name=aoai_connection, with_credentials=with_credentials + connection_name=aoai_connection, include_credentials=include_credentials ) print(connection) ConnectionsTestBase.validate_connection( connection, - with_credentials, + include_credentials, expected_connection_name=aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI, ) connection = await project_client.connections.get( - connection_name=aiservices_connection, with_credentials=with_credentials + connection_name=aiservices_connection, include_credentials=include_credentials ) print(connection) ConnectionsTestBase.validate_connection( connection, - with_credentials, + include_credentials, expected_connection_name=aiservices_connection, expected_connection_type=ConnectionType.AZURE_AI_SERVICES, ) @@ -62,11 +62,11 @@ async def test_connections_get_default_async(self, **kwargs): async with self.get_async_client(**kwargs) as project_client: - for with_credentials in [True, False]: + for include_credentials in [True, False]: try: _ = await project_client.connections.get_default( connection_type=ConnectionsTestBase.NON_EXISTING_CONNECTION_TYPE, - with_credentials=with_credentials, + include_credentials=include_credentials, ) assert False except ResourceNotFoundError as e: @@ -74,23 +74,23 @@ async def test_connections_get_default_async(self, **kwargs): assert ConnectionsTestBase.EXPECTED_EXCEPTION_MESSAGE_FOR_NON_EXISTING_CONNECTION_TYPE in e.message connection = await project_client.connections.get_default( - connection_type=ConnectionType.AZURE_OPEN_AI, with_credentials=with_credentials + connection_type=ConnectionType.AZURE_OPEN_AI, include_credentials=include_credentials ) print(connection) ConnectionsTestBase.validate_connection( connection, - with_credentials, + include_credentials, expected_connection_name=default_aoai_connection, expected_connection_type=ConnectionType.AZURE_OPEN_AI, ) connection = await project_client.connections.get_default( - connection_type=ConnectionType.AZURE_AI_SERVICES, with_credentials=with_credentials + connection_type=ConnectionType.AZURE_AI_SERVICES, include_credentials=include_credentials ) print(connection) ConnectionsTestBase.validate_connection( connection, - with_credentials, + include_credentials, expected_connection_name=default_serverless_connection, expected_connection_type=ConnectionType.AZURE_AI_SERVICES, ) From 4e018c4219ed13247a075a208d2982db69c693e6 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 13 Nov 2024 10:22:37 -0800 Subject: [PATCH 126/138] Try to fix test issues on Python 3.13. Run black --- .../azure/ai/projects/aio/operations/_patch.py | 8 ++++++-- .../azure/ai/projects/operations/_patch.py | 4 +++- sdk/ai/azure-ai-projects/dev_requirements.txt | 5 ++++- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py index 285de5ffb9a8..4ac5ee2a4d32 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/operations/_patch.py @@ -304,13 +304,17 @@ async def get_default( if len(connection_properties_list) > 0: if include_credentials: return await self.get( - connection_name=connection_properties_list[0].name, include_credentials=include_credentials, **kwargs + connection_name=connection_properties_list[0].name, + include_credentials=include_credentials, + **kwargs, ) return connection_properties_list[0] raise ResourceNotFoundError(f"No connection of type {connection_type} found") @distributed_trace_async - async def get(self, *, connection_name: str, include_credentials: bool = False, **kwargs: Any) -> ConnectionProperties: + async def get( + self, *, connection_name: str, include_credentials: bool = False, **kwargs: Any + ) -> ConnectionProperties: """Get the properties of a single connection, given its connection name, with or without populating authentication credentials. Raises ~azure.core.exceptions.ResourceNotFoundError exception if a connection with the given name was not found. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py index 14b5bf6874fe..18cd2f4463bd 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/operations/_patch.py @@ -295,7 +295,9 @@ def get_default( if len(connection_properties_list) > 0: if include_credentials: return self.get( - connection_name=connection_properties_list[0].name, include_credentials=include_credentials, **kwargs + connection_name=connection_properties_list[0].name, + include_credentials=include_credentials, + **kwargs, ) return connection_properties_list[0] raise ResourceNotFoundError(f"No connection of type {connection_type} found") diff --git a/sdk/ai/azure-ai-projects/dev_requirements.txt b/sdk/ai/azure-ai-projects/dev_requirements.txt index 0b28efcde9bc..128de4598b61 100644 --- a/sdk/ai/azure-ai-projects/dev_requirements.txt +++ b/sdk/ai/azure-ai-projects/dev_requirements.txt @@ -4,7 +4,10 @@ ../../core/azure-core-tracing-opentelemetry aiohttp azure-ai-inference -openai +# Pipeline has some issues with loading openai dependencies when using Python 3.13. Since we only use openai +# in live tests without recordings, we don't have pipeline tests that use openai. So we can exclude if from here, +# or only declare dependency for Python version earlier than 3.13, since we still want to run tests locally. +openai; python_version <= '3.12' opentelemetry-sdk opentelemetry-exporter-otlp-proto-grpc azure-ai-ml From 5fe839cfd97432693f6f2b8bf18323a64eed3882 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 13 Nov 2024 11:22:44 -0800 Subject: [PATCH 127/138] Try again to fix Python 3.13 test failure --- sdk/ai/azure-ai-projects/dev_requirements.txt | 5 +---- sdk/ai/azure-ai-projects/setup.py | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/sdk/ai/azure-ai-projects/dev_requirements.txt b/sdk/ai/azure-ai-projects/dev_requirements.txt index 128de4598b61..0b28efcde9bc 100644 --- a/sdk/ai/azure-ai-projects/dev_requirements.txt +++ b/sdk/ai/azure-ai-projects/dev_requirements.txt @@ -4,10 +4,7 @@ ../../core/azure-core-tracing-opentelemetry aiohttp azure-ai-inference -# Pipeline has some issues with loading openai dependencies when using Python 3.13. Since we only use openai -# in live tests without recordings, we don't have pipeline tests that use openai. So we can exclude if from here, -# or only declare dependency for Python version earlier than 3.13, since we still want to run tests locally. -openai; python_version <= '3.12' +openai opentelemetry-sdk opentelemetry-exporter-otlp-proto-grpc azure-ai-ml diff --git a/sdk/ai/azure-ai-projects/setup.py b/sdk/ai/azure-ai-projects/setup.py index 2f225140b1e5..d260292fd47b 100644 --- a/sdk/ai/azure-ai-projects/setup.py +++ b/sdk/ai/azure-ai-projects/setup.py @@ -66,7 +66,7 @@ install_requires=[ "isodate>=0.6.1", "azure-core>=1.30.0", - "typing-extensions>=4.6.0", + "typing-extensions>=4.12.2", ], python_requires=">=3.8", ) From 355e68bde94643239e1787865c639b9e22762f51 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 13 Nov 2024 13:08:23 -0800 Subject: [PATCH 128/138] Attempt to fix azure-ai-projects not showing up in the release pipeline --- sdk/ai/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sdk/ai/ci.yml b/sdk/ai/ci.yml index 6ee45adc4d54..613dfe7015f5 100644 --- a/sdk/ai/ci.yml +++ b/sdk/ai/ci.yml @@ -46,6 +46,8 @@ extends: # Selection: sparse # GenerateVMJobs: true Artifacts: + - name: azure-ai-projects + safeName: azureaiprojects - name: azure-ai-inference safeName: azureaiinference - name: azure-ai-generative From 83a4fa57e358b27a155be17bf757e9e5e941e8db Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 13 Nov 2024 13:12:50 -0800 Subject: [PATCH 129/138] Fix release date in CHANGELOG.md --- sdk/ai/azure-ai-projects/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 628743d283a9..779d56342466 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -1,5 +1,5 @@ # Release History -## 1.0.0b1 (1970-01-01) +## 1.0.0b1 (2024-11-15) - Initial version From a201c19fe5cf1916adf260297b3ae8ee04bc5008 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 13 Nov 2024 13:40:18 -0800 Subject: [PATCH 130/138] Fix CHANGELOG.md --- sdk/ai/azure-ai-projects/CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sdk/ai/azure-ai-projects/CHANGELOG.md b/sdk/ai/azure-ai-projects/CHANGELOG.md index 779d56342466..669b7c543a74 100644 --- a/sdk/ai/azure-ai-projects/CHANGELOG.md +++ b/sdk/ai/azure-ai-projects/CHANGELOG.md @@ -2,4 +2,6 @@ ## 1.0.0b1 (2024-11-15) -- Initial version +### Features Added + +First beta version From 2c084bf25a3480dc4b8d0aed6bdbf38fd5f99241 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 13 Nov 2024 17:44:26 -0800 Subject: [PATCH 131/138] README update. Address PR review comment --- sdk/ai/azure-ai-projects/README.md | 262 ++++++++++++------ .../azure/ai/projects/_patch.py | 3 +- .../azure/ai/projects/aio/_patch.py | 3 +- 3 files changed, 181 insertions(+), 87 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index c0f46f0174a9..93a543f1c9a2 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -1,53 +1,188 @@ # Azure AI Projects client library for Python - + +Use the AI Projects client library (in preview) to: + +* **Enumerate connections** in your Azure AI Studio project and get connection properties. +For example, get the inference endpoint URL and credentials associated with your Azure OpenAI connection. +* **Get an already-authenticated Inference client** for the default Azure OpenAI or AI Services connections in your Azure AI Studio project. Supports the AzureOpenAI client from the `openai` package, or clients from the `azure-ai-inference` package. +* **Develop agents using the Azure AI Agent Service**, leveraging an extensive ecosystem of models, tools, and capabilities from OpenAI, Microsoft, and other LLM providers. The Azure AI Agent Service enables the building of agents for a wide range of generative AI use cases. The package is currently in private preview. +* TODO: Evaluations +* **Enable OpenTelemetry tracing**. + +[Product documentation](https://aka.ms/azsdk/azure-ai-projects/product-doc) +| [Samples](https://aka.ms/azsdk/azure-ai-projects/python/samples) +| [API reference documentation](https://aka.ms/azsdk/azure-ai-projects/python/reference) +| [Package (Pypi)](https://aka.ms/azsdk/azure-ai-projects/python/package) +| [SDK source code](https://aka.ms/azsdk/azure-ai-projects/python/code) ## Getting started +### Prequisites + +- Python 3.8 or later. +- An [Azure subscription][azure_sub]. +- A [project in Azure AI Studio](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/create-projects?tabs=ai-studio). +- The project connection string. It can be found in your Azure AI Studio project overview page, under "Project details". Below we will assume the environment variable `PROJECT_CONNECTION_STRING` was defined to hold this value. +- Entra ID is needed to authenticate the client. Your application needs an object that implements the [TokenCredential](https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential) interface. Code samples here use [DefaultAzureCredential](https://learn.microsoft.com/python/api/azure-identity/azure.identity.defaultazurecredential). To get that working, you will need: + * The role `Azure AI Developer` assigned to you. Role assigned can be done via the "Access Control (IAM)" tab of your Azure AI Project resource in the Azure portal. + * The token must have the scope `https://management.azure.com/.default` or `https://ml.azure.com/.default`, depending on the set of client operation you will execute. + * [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed. + * You are logged into your Azure account by running `az login`. + * Note that if you have multiple Azure subscriptions, the subscription that contains your Azure AI Project resource must be your default subscription. Run `az account list --output table` to list all you subscription and see which one is the default. Run `az account set --subscription "Your Subscription ID or Name"` to change your default subscription. + ### Install the package ```bash -python -m pip install azure-ai-projects +pip install azure-ai-projects ``` -#### Prequisites +## Key concepts -- Python 3.8 or later is required to use this package. -- You need an [Azure subscription][azure_sub] to use this package. -- An existing Azure Ai Projects instance. -#### Create with an Azure Active Directory Credential -To use an [Azure Active Directory (AAD) token credential][authenticate_with_token], -provide an instance of the desired credential type obtained from the -[azure-identity][azure_identity_credentials] library. +### Create and authenticate the client -To authenticate with AAD, you must first [pip][pip] install [`azure-identity`][azure_identity_pip] +The class factory method `from_connection_string` is used to construct the client. To construct a synchronous client: -After setup, you can choose which type of [credential][azure_identity_credentials] from azure.identity to use. -As an example, [DefaultAzureCredential][default_azure_credential] can be used to authenticate the client: +```python +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential -Set the values of the client ID, tenant ID, and client secret of the AAD application as environment variables: -`AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, `AZURE_CLIENT_SECRET` +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) +``` -Use the returned token credential to authenticate the client: +To construct an asynchronous client, Install the additional package [aiohttp](https://pypi.org/project/aiohttp/): -```python ->>> from azure.ai.projects import AIProjectClient ->>> from azure.identity import DefaultAzureCredential ->>> client = AIProjectClient(endpoint='', credential=DefaultAzureCredential()) +```bash +pip install aiohttp ``` -## Key concepts +and update the code above to import `asyncio`, and import `AIProjectClient` from the `azure.ai.projects.aio` namespace: -TODO +```python +import os +import asyncio +from azure.ai.projects.aio import AIProjectClient +from azure.core.credentials import AzureKeyCredential + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) +``` ## Examples +### Enumerate connections + +You Azure AI Studio project has a "Management center". When you enter it, you will see a tab named "Connected resources" under your project. The `.connections` operations on the client allow you to enumerate the connections and get connection properties. Connection properties include the resource URL and authentication credentials, among other things. + +#### Get properties of all connections + +To list the properties of all the connections in the Azure AI Studio project: + +```python +connections = project_client.connections.list() +for connection in connections: + print(connection) +``` + +#### Get properties of all connections of a particular type + +To list the properties of connections of a certain type (here Azure OpenAI): + +```python +connections = project_client.connections.list( + connection_type=ConnectionType.AZURE_OPEN_AI, +) +for connection in connections: + print(connection) + +``` + +#### Get properties of a default connection + +To get the properties of the default connection of a certain type (here Azure OpenAI), +with its authentication credentials: + +```python +connection = project_client.connections.get_default( + connection_type=ConnectionType.AZURE_OPEN_AI, + include_credentials=True, # Optional. Defaults to "False" +) +print(connection) +``` + +If the call was made with `include_credentials=True`, depending on the value of `connection.authentication_type`, either `connection.key` or `connection.token_credential` +will be populated. Otherwise both will be `None`. + +#### Get proprties of a connection by its connection name + +To get the connection properties of a connection with name `connection_name`: + +```python +connection = project_client.connections.get( + connection_name=connection_name, include_credentials=True # Optional. Defaults to "False" +) +print(connection) +``` + +### Get an authenticated ChatCompletionsClient + +Your Azure AI Studio project may have one or more AI models deployed that support chat completions. These could be OpenAI models, Microsoft models, or models from other providers. Use the code below to get an already authenticated [ChatCompletionsClient](https://learn.microsoft.com/python/api/azure-ai-inference/azure.ai.inference.chatcompletionsclient?view=azure-python-preview) from the [azure-ai-inference](https://pypi.org/project/azure-ai-inference/) package, and excute a chat completions call. First, install the package: + +```bash +pip insall azure-ai-inference +``` + +Then run this code: + +```python +inference_client = project_client.inference.get_chat_completions_client() + +response = inference_client.complete( + model="gpt-4o", + messages=[UserMessage(content="How many feet are in a mile?")] +) + +print(response.choices[0].message.content) +``` + +### Get an authenticated AzureOpenAI client + +Your Azure AI Studio project may have one or more OpenAI models deployed that support chat completions. Use the code below to get an already authenticated [AzureOpenAI](https://github.com/openai/openai-python?tab=readme-ov-file#microsoft-azure-openai) from the [openai](https://pypi.org/project/openai/) package, and excute a chat completions call. First, install the package: + +```bash +pip insall openai +``` + +Then run this code: + +```python +aoai_client = project_client.inference.get_azure_openai_client(api_version="2024-06-01") + +response = aoai_client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "How many feet are in a mile?", + }, + ], +) + +print(response.choices[0].message.content) +``` + ### Agents (Private Preview) + Agents in the Azure AI Projects client library are designed to facilitate various interactions and operations within your AI projects. They serve as the core components that manage and execute tasks, leveraging different tools and resources to achieve specific goals. The following steps outline the typical sequence for interacting with agents: Agents are actively being developed. A sign-up form for private preview is coming soon. - - Create project client - Create agent with: - File Search - Code interpreter @@ -65,49 +200,9 @@ Agents are actively being developed. A sign-up form for private preview is comin - Tear down by deleting resource - Tracing - -#### Create Project Client - -When you create a project client, you need to make the decision to use synchronous or asynchronous client. Use either: - -```python -from azure.ai.projects import AIProjectClient -# OR -from azure.ai.projects.aio import AIProjectClient -``` - -Here is an example of creating a project client: - - -```python -project_client = AIProjectClient.from_connection_string( - credential=DefaultAzureCredential(), - conn_str=os.environ["PROJECT_CONNECTION_STRING"], -) -``` - - - -Because the client is under resource and context manager, you are required to use `with` or `async with` to consume the client object: - -```python -# For synchronous -with project_client: - agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", - name="my-assistant", - instructions="You are helpful assistant" - ) - -``` - -In the sections below, we will only provide code snippets in synchronous functions. - #### Create Agent -Now you should have your project client. From the project client, you create an agent to serve the end users. - -Here is an example of create an agent: +Here is an example of how to create an agent: ```python @@ -118,7 +213,7 @@ agent = project_client.agents.create_agent( -To allow agents to access your resources or custom functions, you need tools. You can pass tools to `create_agent` by either `toolset` or combination of `tools` and `tool_resources`. +To allow agents to access your resources or custom functions, you need tools. You can pass tools to `create_agent` by either `toolset` or combination of `tools` and `tool_resources`. Here is an example of `toolset`: @@ -158,11 +253,11 @@ agent = project_client.agents.create_agent( -In the following sections, we show you code snips in either `toolset` or combination of `tools` and `tool_resources`. But you are welcome to use another approach. +In the following sections, we show you sample code in either `toolset` or combination of `tools` and `tool_resources`. But you are welcome to use another approach. #### Create Agent with File Search -To perform file search by an agent, we first need to upload a file, create a vector store, and associate the file to the vector store. -Here is an example: + +To perform file search by an agent, we first need to upload a file, create a vector store, and associate the file to the vector store. Here is an example: @@ -187,7 +282,6 @@ agent = project_client.agents.create_agent( - #### Create Agent with Code Interpreter Here is an example to upload a file and use it for code interpreter by an agent: @@ -214,8 +308,8 @@ agent = project_client.agents.create_agent( - #### Create Agent with Bing Grounding + To enable your agent to perform search through Bing search API, you use `BingGroundingTool` along with a connection. Here is an example: @@ -245,6 +339,7 @@ with project_client: #### Create Agent with Azure AI Search + Azure AI Search is an enterprise search system for high-performance applications. It integrates with Azure OpenAI Service and Azure Machine Learning, offering advanced search technologies like vector search and full-text search. Ideal for knowledge base insights, information discovery, and automation Here is an example to integrate Azure AI Search: @@ -284,7 +379,7 @@ You can enhance your agents by defining callback functions as function tools. Th - `toolset`: When using the `toolset` parameter, you provide not only the function definitions and descriptions but also their implementations. The SDK will execute these functions within `create_and_run_process` or `streaming` . These functions will be invoked based on their definitions. - `tools` and `tool_resources`: When using the `tools` and `tool_resources` parameters, only the function definitions and descriptions are provided to `create_agent`, without the implementations. The `Run` or `event handler of stream` will raise a `requires_action` status based on the function definitions. Your code must handle this status and call the appropriate functions. - + For more details about calling functions by code, refer to [`sample_agents_stream_eventhandler_with_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_stream_eventhandler_with_functions.py) and [`sample_agents_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py). Here is an example to use [user functions](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/user_functions.py) in `toolset`: @@ -339,7 +434,6 @@ thread = project_client.agents.create_thread() In some scenarios, you might need to assign specific resources to individual threads. To achieve this, you provide the `tool_resources` argument to `create_thread`. In the following example, you create a vector store and upload a file, enable an agent for file search using the `tools` argument, and then associate the file with the thread using the `tool_resources` argument. - ```python @@ -380,6 +474,7 @@ message = project_client.agents.create_message(thread_id=thread.id, role="user", #### Create Message with File Search Attachment + To attach a file to a message for content searching, you use `MessageAttachment` and `FileSearchTool`: @@ -394,7 +489,8 @@ message = project_client.agents.create_message( #### Create Message with Code Interpreter Attachment -To attach a file to a message for data analysis, you use `MessageAttachment` and `CodeInterpreterTool`. You must pass `CodeInterpreterTool` as `tools` or `toolset` in `create_agent` call or the file attachment cannot be opened for code interpreter. + +To attach a file to a message for data analysis, you use `MessageAttachment` and `CodeInterpreterTool`. You must pass `CodeInterpreterTool` as `tools` or `toolset` in `create_agent` call or the file attachment cannot be opened for code interpreter. Here is an example to pass `CodeInterpreterTool` as tool: @@ -462,7 +558,7 @@ run = project_client.agents.create_and_process_run(thread_id=thread.id, assistan -With streaming, polling also need not be considered. If `function tools` are provided as `toolset` during the `create_agent` call, they will be invoked by the SDK. +With streaming, polling also need not be considered. If `function tools` are provided as `toolset` during the `create_agent` call, they will be invoked by the SDK. Here is an example: @@ -510,7 +606,6 @@ class MyEventHandler(AgentEventHandler): - #### Retrieve Message To retrieve messages from agents, use the following example: @@ -612,7 +707,7 @@ print("Deleted agent") #### Tracing -As part of Azure AI project, you can use the its connection string and observe the full execution path through Azure Monitor. Typically you might want to start tracing before you create an agent. +As part of Azure AI project, you can use the its connection string and observe the full execution path through Azure Monitor. Typically you might want to start tracing before you create an agent. ##### Installation @@ -631,8 +726,9 @@ To connect to Aspire Dashboard or another OpenTelemetry compatible backend, inst pip install opentelemetry-exporter-otlp ``` -##### Examples -Here is a code snip to be included above `create_agent`: +##### Tracing example + +Here is a code sample to be included above `create_agent`: @@ -657,7 +753,7 @@ with tracer.start_as_current_span(scenario): -In additional, you might find helpful to see the tracing logs in console. You can achieve by the following code: +In additional, you might find helpful to see the tracing logs in console. You can achieve by the following code: ```python project_client.telemetry.enable(destination=sys.stdout) @@ -675,7 +771,7 @@ from azure.core.exceptions import HttpResponseError ... try: - result = client.connections.list() + result = project_client.connections.list() except HttpResponseError as e: print(f"Status code: {e.status_code} ({e.reason})") print(e.message) @@ -717,9 +813,9 @@ logger.addHandler(handler) By default logs redact the values of URL query strings, the values of some HTTP request and response headers (including `Authorization` which holds the key or token), and the request and response payloads. To create logs without redaction, add `logging_enable = True` to the client constructor: ```python -client = AIProjectClient.from_connection_string( +project_client = AIProjectClient.from_connection_string( credential=DefaultAzureCredential(), - conn_str=project_connection_string, + conn_str=os.environ["PROJECT_CONNECTION_STRING"], logging_enable = True ) ``` @@ -757,7 +853,7 @@ additional questions or comments. [code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ -[authenticate_with_token]: https://docs.microsoft.com/azure/cognitive-services/authentication?tabs=powershell#authenticate-with-an-authentication-token +[entra_id]: https://learn.microsoft.com/azure/ai-services/authentication?tabs=powershell#authenticate-with-microsoft-entra-id [azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials [azure_identity_pip]: https://pypi.org/project/azure-identity/ [default_azure_credential]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#defaultazurecredential diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py index ea4cd7bfb3be..8f825c5415c8 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/_patch.py @@ -10,7 +10,6 @@ from os import PathLike from pathlib import Path from typing import Any, Dict, List, Tuple, Union - from typing_extensions import Self from azure.core import PipelineClient @@ -224,7 +223,7 @@ def __exit__(self, *exc_details: Any) -> None: self._client3.__exit__(*exc_details) @classmethod - def from_connection_string(cls, conn_str: str, credential: "TokenCredential", **kwargs) -> "AIProjectClient": + def from_connection_string(cls, conn_str: str, credential: "TokenCredential", **kwargs) -> Self: """ Create an AIProjectClient from a connection string. diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py index b544d0913c79..a522fe6ac71a 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/aio/_patch.py @@ -12,7 +12,6 @@ from os import PathLike from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union - from typing_extensions import Self from azure.core import AsyncPipelineClient @@ -224,7 +223,7 @@ async def __aexit__(self, *exc_details: Any) -> None: await self._client3.__aexit__(*exc_details) @classmethod - def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential", **kwargs) -> "AIProjectClient": + def from_connection_string(cls, conn_str: str, credential: "AsyncTokenCredential", **kwargs) -> Self: """ Create an asynchronous AIProjectClient from a connection string. From 471053cd467814821b58a3188ea7867797044475 Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 13 Nov 2024 17:46:56 -0800 Subject: [PATCH 132/138] Update code snippets in pacakge README.md --- sdk/ai/azure-ai-projects/README.md | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 93a543f1c9a2..e6ad5fe821ac 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -207,7 +207,9 @@ Here is an example of how to create an agent: ```python agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", name="my-assistant", instructions="You are helpful assistant" + model="gpt-4-1106-preview", + name="my-assistant", + instructions="You are helpful assistant", ) ``` @@ -614,8 +616,13 @@ To retrieve messages from agents, use the following example: ```python messages = project_client.agents.list_messages(thread_id=thread.id) -last_message_content = messages.data[-1].content[-1].text.value -print(f"Last message content: {last_message_content}") + +# The messages are following in the reverse order, +# we will iterate them and output only text contents. +for data_point in reversed(messages.data): + last_message_content = data_point.content[-1] + if isinstance(last_message_content, MessageTextContent): + print(f"{data_point.role}: {last_message_content.text.value}") ``` From 2364c3f8cc4fdb4ebd7515774ba3f2ff598581dd Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Wed, 13 Nov 2024 23:42:13 -0800 Subject: [PATCH 133/138] More README.md fixes --- sdk/ai/azure-ai-projects/README.md | 48 +++++++++++++++--------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index e6ad5fe821ac..7ffee4b12bc2 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -6,19 +6,19 @@ Use the AI Projects client library (in preview) to: * **Enumerate connections** in your Azure AI Studio project and get connection properties. For example, get the inference endpoint URL and credentials associated with your Azure OpenAI connection. * **Get an already-authenticated Inference client** for the default Azure OpenAI or AI Services connections in your Azure AI Studio project. Supports the AzureOpenAI client from the `openai` package, or clients from the `azure-ai-inference` package. -* **Develop agents using the Azure AI Agent Service**, leveraging an extensive ecosystem of models, tools, and capabilities from OpenAI, Microsoft, and other LLM providers. The Azure AI Agent Service enables the building of agents for a wide range of generative AI use cases. The package is currently in private preview. -* TODO: Evaluations +* **Develop Agents using the Azure AI Agent Service**, leveraging an extensive ecosystem of models, tools, and capabilities from OpenAI, Microsoft, and other LLM providers. The Azure AI Agent Service enables the building of Agents for a wide range of generative AI use cases. The package is currently in private preview. +* **TODO: Evaluations** * **Enable OpenTelemetry tracing**. [Product documentation](https://aka.ms/azsdk/azure-ai-projects/product-doc) | [Samples](https://aka.ms/azsdk/azure-ai-projects/python/samples) | [API reference documentation](https://aka.ms/azsdk/azure-ai-projects/python/reference) -| [Package (Pypi)](https://aka.ms/azsdk/azure-ai-projects/python/package) +| [Package (PyPI)](https://aka.ms/azsdk/azure-ai-projects/python/package) | [SDK source code](https://aka.ms/azsdk/azure-ai-projects/python/code) ## Getting started -### Prequisites +### Prerequisite - Python 3.8 or later. - An [Azure subscription][azure_sub]. @@ -29,7 +29,7 @@ For example, get the inference endpoint URL and credentials associated with your * The token must have the scope `https://management.azure.com/.default` or `https://ml.azure.com/.default`, depending on the set of client operation you will execute. * [Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) installed. * You are logged into your Azure account by running `az login`. - * Note that if you have multiple Azure subscriptions, the subscription that contains your Azure AI Project resource must be your default subscription. Run `az account list --output table` to list all you subscription and see which one is the default. Run `az account set --subscription "Your Subscription ID or Name"` to change your default subscription. + * Note that if you have multiple Azure subscriptions, the subscription that contains your Azure AI Project resource must be your default subscription. Run `az account list --output table` to list all your subscription and see which one is the default. Run `az account set --subscription "Your Subscription ID or Name"` to change your default subscription. ### Install the package @@ -119,7 +119,7 @@ print(connection) If the call was made with `include_credentials=True`, depending on the value of `connection.authentication_type`, either `connection.key` or `connection.token_credential` will be populated. Otherwise both will be `None`. -#### Get proprties of a connection by its connection name +#### Get properties of a connection by its connection name To get the connection properties of a connection with name `connection_name`: @@ -132,10 +132,10 @@ print(connection) ### Get an authenticated ChatCompletionsClient -Your Azure AI Studio project may have one or more AI models deployed that support chat completions. These could be OpenAI models, Microsoft models, or models from other providers. Use the code below to get an already authenticated [ChatCompletionsClient](https://learn.microsoft.com/python/api/azure-ai-inference/azure.ai.inference.chatcompletionsclient?view=azure-python-preview) from the [azure-ai-inference](https://pypi.org/project/azure-ai-inference/) package, and excute a chat completions call. First, install the package: +Your Azure AI Studio project may have one or more AI models deployed that support chat completions. These could be OpenAI models, Microsoft models, or models from other providers. Use the code below to get an already authenticated [ChatCompletionsClient](https://learn.microsoft.com/python/api/azure-ai-inference/azure.ai.inference.chatcompletionsclient?view=azure-python-preview) from the [azure-ai-inference](https://pypi.org/project/azure-ai-inference/) package, and execute a chat completions call. First, install the package: ```bash -pip insall azure-ai-inference +pip install azure-ai-inference ``` Then run this code: @@ -153,10 +153,10 @@ print(response.choices[0].message.content) ### Get an authenticated AzureOpenAI client -Your Azure AI Studio project may have one or more OpenAI models deployed that support chat completions. Use the code below to get an already authenticated [AzureOpenAI](https://github.com/openai/openai-python?tab=readme-ov-file#microsoft-azure-openai) from the [openai](https://pypi.org/project/openai/) package, and excute a chat completions call. First, install the package: +Your Azure AI Studio project may have one or more OpenAI models deployed that support chat completions. Use the code below to get an already authenticated [AzureOpenAI](https://github.com/openai/openai-python?tab=readme-ov-file#microsoft-azure-openai) from the [openai](https://pypi.org/project/openai/) package, and execute a chat completions call. First, install the package: ```bash -pip insall openai +pip install openai ``` Then run this code: @@ -179,11 +179,11 @@ print(response.choices[0].message.content) ### Agents (Private Preview) -Agents in the Azure AI Projects client library are designed to facilitate various interactions and operations within your AI projects. They serve as the core components that manage and execute tasks, leveraging different tools and resources to achieve specific goals. The following steps outline the typical sequence for interacting with agents: +Agents in the Azure AI Projects client library are designed to facilitate various interactions and operations within your AI projects. They serve as the core components that manage and execute tasks, leveraging different tools and resources to achieve specific goals. The following steps outline the typical sequence for interacting with Agents: Agents are actively being developed. A sign-up form for private preview is coming soon. - - Create agent with: + - Create an Agent with: - File Search - Code interpreter - Bing grounding @@ -202,7 +202,7 @@ Agents are actively being developed. A sign-up form for private preview is comin #### Create Agent -Here is an example of how to create an agent: +Here is an example of how to create an Agent: ```python @@ -215,7 +215,7 @@ agent = project_client.agents.create_agent( -To allow agents to access your resources or custom functions, you need tools. You can pass tools to `create_agent` by either `toolset` or combination of `tools` and `tool_resources`. +To allow Agents to access your resources or custom functions, you need tools. You can pass tools to `create_agent` by either `toolset` or combination of `tools` and `tool_resources`. Here is an example of `toolset`: @@ -255,11 +255,11 @@ agent = project_client.agents.create_agent( -In the following sections, we show you sample code in either `toolset` or combination of `tools` and `tool_resources`. But you are welcome to use another approach. +In the following sections, we show you sample code in either `toolset` or combination of `tools` and `tool_resources`. #### Create Agent with File Search -To perform file search by an agent, we first need to upload a file, create a vector store, and associate the file to the vector store. Here is an example: +To perform file search by an Agent, we first need to upload a file, create a vector store, and associate the file to the vector store. Here is an example: @@ -286,7 +286,7 @@ agent = project_client.agents.create_agent( #### Create Agent with Code Interpreter -Here is an example to upload a file and use it for code interpreter by an agent: +Here is an example to upload a file and use it for code interpreter by an Agent: @@ -312,7 +312,7 @@ agent = project_client.agents.create_agent( #### Create Agent with Bing Grounding -To enable your agent to perform search through Bing search API, you use `BingGroundingTool` along with a connection. +To enable your Agent to perform search through Bing search API, you use `BingGroundingTool` along with a connection. Here is an example: @@ -377,7 +377,7 @@ with project_client: #### Create Agent with Function Call -You can enhance your agents by defining callback functions as function tools. These can be provided to `create_agent` via either the `toolset` parameter or the combination of `tools` and `tool_resources`. Here are the distinctions: +You can enhance your Agents by defining callback functions as function tools. These can be provided to `create_agent` via either the `toolset` parameter or the combination of `tools` and `tool_resources`. Here are the distinctions: - `toolset`: When using the `toolset` parameter, you provide not only the function definitions and descriptions but also their implementations. The SDK will execute these functions within `create_and_run_process` or `streaming` . These functions will be invoked based on their definitions. - `tools` and `tool_resources`: When using the `tools` and `tool_resources` parameters, only the function definitions and descriptions are provided to `create_agent`, without the implementations. The `Run` or `event handler of stream` will raise a `requires_action` status based on the function definitions. Your code must handle this status and call the appropriate functions. @@ -434,7 +434,7 @@ thread = project_client.agents.create_thread() #### Create Thread with Tool Resource -In some scenarios, you might need to assign specific resources to individual threads. To achieve this, you provide the `tool_resources` argument to `create_thread`. In the following example, you create a vector store and upload a file, enable an agent for file search using the `tools` argument, and then associate the file with the thread using the `tool_resources` argument. +In some scenarios, you might need to assign specific resources to individual threads. To achieve this, you provide the `tool_resources` argument to `create_thread`. In the following example, you create a vector store and upload a file, enable an Agent for file search using the `tools` argument, and then associate the file with the thread using the `tool_resources` argument. @@ -530,7 +530,7 @@ message = project_client.agents.create_message( To process your message, you can use `create_run`, `create_and_process_run`, or `create_stream`. -`create_run` requests the agent to process the message without polling for the result. If you are using `function tools` regardless as `toolset` or not, your code is responsible for polling for the result and acknowledging the status of `Run`. When the status is `requires_action`, your code is responsible for calling the function tools. For a code sample, visit [`sample_agents_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py). +`create_run` requests the Agent to process the message without polling for the result. If you are using `function tools` regardless as `toolset` or not, your code is responsible for polling for the result and acknowledging the status of `Run`. When the status is `requires_action`, your code is responsible for calling the function tools. For a code sample, visit [`sample_agents_functions.py`](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-projects/samples/agents/sample_agents_functions.py). Here is an example of `create_run` and poll until the run is completed: @@ -627,12 +627,12 @@ for data_point in reversed(messages.data): -Depending on the use case, if you expect the agents to return only text messages, `list_messages` should be sufficient. +Depending on the use case, if you expect the Agents to return only text messages, `list_messages` should be sufficient. If you are using tools, consider using the `get_messages` function instead. This function classifies the message content and returns properties such as `text_messages`, `image_contents`, `file_citation_annotations`, and `file_path_annotations`. ### Retrieve File -Files uploaded by agents cannot be retrieved back. If your use case need to access the file content uploaded by the agents, you are adviced to keep an additional copy accessible by your application. However, files generated by agents are retrievable by `save_file` or `get_file_content`. +Files uploaded by Agents cannot be retrieved back. If your use case need to access the file content uploaded by the Agents, you are advised to keep an additional copy accessible by your application. However, files generated by Agents are retrievable by `save_file` or `get_file_content`. Here is an example retrieving file ids from messages and save to the local drive: @@ -714,7 +714,7 @@ print("Deleted agent") #### Tracing -As part of Azure AI project, you can use the its connection string and observe the full execution path through Azure Monitor. Typically you might want to start tracing before you create an agent. +You can add an Application Insights Azure resource to your Azure AI Studio project. See the Tracing tab in your studio. If one was enabled, you can get the Application Insights connection string, configure your Agents, and observe the full execution path through Azure Monitor. Typically, you might want to start tracing before you create an Agent. ##### Installation From 65f29799f9a490ccc7e072fd7ceec2134399522b Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 14 Nov 2024 00:04:41 -0800 Subject: [PATCH 134/138] More --- sdk/ai/azure-ai-projects/README.md | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 7ffee4b12bc2..2f54f6514abd 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -11,7 +11,7 @@ For example, get the inference endpoint URL and credentials associated with your * **Enable OpenTelemetry tracing**. [Product documentation](https://aka.ms/azsdk/azure-ai-projects/product-doc) -| [Samples](https://aka.ms/azsdk/azure-ai-projects/python/samples) +| [Samples][samples] | [API reference documentation](https://aka.ms/azsdk/azure-ai-projects/python/reference) | [Package (PyPI)](https://aka.ms/azsdk/azure-ai-projects/python/package) | [SDK source code](https://aka.ms/azsdk/azure-ai-projects/python/code) @@ -80,6 +80,8 @@ project_client = AIProjectClient.from_connection_string( You Azure AI Studio project has a "Management center". When you enter it, you will see a tab named "Connected resources" under your project. The `.connections` operations on the client allow you to enumerate the connections and get connection properties. Connection properties include the resource URL and authentication credentials, among other things. +Below are code examples of some simple connection operations. Additional samples can be found under the "connetions" folder in the [package samples][samples]. + #### Get properties of all connections To list the properties of all the connections in the Azure AI Studio project: @@ -138,19 +140,21 @@ Your Azure AI Studio project may have one or more AI models deployed that suppor pip install azure-ai-inference ``` -Then run this code: +Then run this code (replace "gpt-4o" with your model deployment name): ```python inference_client = project_client.inference.get_chat_completions_client() response = inference_client.complete( - model="gpt-4o", + model="gpt-4o", # Model deployment name messages=[UserMessage(content="How many feet are in a mile?")] ) print(response.choices[0].message.content) ``` +See the "inference" folder in the [package samples][samples] for additional samples, including getting an authenticated [EmbeddingsClient](https://learn.microsoft.com/python/api/azure-ai-inference/azure.ai.inference.embeddingsclient?view=azure-python-preview). + ### Get an authenticated AzureOpenAI client Your Azure AI Studio project may have one or more OpenAI models deployed that support chat completions. Use the code below to get an already authenticated [AzureOpenAI](https://github.com/openai/openai-python?tab=readme-ov-file#microsoft-azure-openai) from the [openai](https://pypi.org/project/openai/) package, and execute a chat completions call. First, install the package: @@ -159,13 +163,13 @@ Your Azure AI Studio project may have one or more OpenAI models deployed that su pip install openai ``` -Then run this code: +Then run this code (replace "gpt-4o" with your model deployment name): ```python aoai_client = project_client.inference.get_azure_openai_client(api_version="2024-06-01") response = aoai_client.chat.completions.create( - model="gpt-4o", + model="gpt-4o", # Model deployment name messages=[ { "role": "user", @@ -177,9 +181,11 @@ response = aoai_client.chat.completions.create( print(response.choices[0].message.content) ``` +See the "inference" folder in the [package samples][samples] for additional samples. + ### Agents (Private Preview) -Agents in the Azure AI Projects client library are designed to facilitate various interactions and operations within your AI projects. They serve as the core components that manage and execute tasks, leveraging different tools and resources to achieve specific goals. The following steps outline the typical sequence for interacting with Agents: +Agents in the Azure AI Projects client library are designed to facilitate various interactions and operations within your AI projects. They serve as the core components that manage and execute tasks, leveraging different tools and resources to achieve specific goals. The following steps outline the typical sequence for interacting with Agents. See the "agents" folder in the [package samples][samples] for additional Agent samples. Agents are actively being developed. A sign-up form for private preview is coming soon. @@ -859,6 +865,7 @@ see the Code of Conduct FAQ or contact opencode@microsoft.com with any additional questions or comments. +[samples]: https://aka.ms/azsdk/azure-ai-projects/python/samples/ [code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ [entra_id]: https://learn.microsoft.com/azure/ai-services/authentication?tabs=powershell#authenticate-with-microsoft-entra-id [azure_identity_credentials]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity#credentials From 9e459d91e14345f4e1ec81e91dbad25c1874bbed Mon Sep 17 00:00:00 2001 From: Darren Cohen <39422044+dargilco@users.noreply.github.com> Date: Thu, 14 Nov 2024 00:33:22 -0800 Subject: [PATCH 135/138] Update tests README.md --- sdk/ai/azure-ai-projects/README.md | 2 +- sdk/ai/azure-ai-projects/tests/README.md | 43 +++++++++--------------- 2 files changed, 17 insertions(+), 28 deletions(-) diff --git a/sdk/ai/azure-ai-projects/README.md b/sdk/ai/azure-ai-projects/README.md index 2f54f6514abd..4b3982c55354 100644 --- a/sdk/ai/azure-ai-projects/README.md +++ b/sdk/ai/azure-ai-projects/README.md @@ -22,7 +22,7 @@ For example, get the inference endpoint URL and credentials associated with your - Python 3.8 or later. - An [Azure subscription][azure_sub]. -- A [project in Azure AI Studio](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/create-projects?tabs=ai-studio). +- A [project in Azure AI Studio](https://learn.microsoft.com/azure/ai-studio/how-to/create-projects?tabs=ai-studio). - The project connection string. It can be found in your Azure AI Studio project overview page, under "Project details". Below we will assume the environment variable `PROJECT_CONNECTION_STRING` was defined to hold this value. - Entra ID is needed to authenticate the client. Your application needs an object that implements the [TokenCredential](https://learn.microsoft.com/python/api/azure-core/azure.core.credentials.tokencredential) interface. Code samples here use [DefaultAzureCredential](https://learn.microsoft.com/python/api/azure-identity/azure.identity.defaultazurecredential). To get that working, you will need: * The role `Azure AI Developer` assigned to you. Role assigned can be done via the "Access Control (IAM)" tab of your Azure AI Project resource in the Azure portal. diff --git a/sdk/ai/azure-ai-projects/tests/README.md b/sdk/ai/azure-ai-projects/tests/README.md index acda18e41581..b2ceca2de228 100644 --- a/sdk/ai/azure-ai-projects/tests/README.md +++ b/sdk/ai/azure-ai-projects/tests/README.md @@ -1,26 +1,24 @@ # Azure AI Project client library tests for Python -The instructions below are for running tests locally, on a Windows machine, against the live service. +The instructions below are for running tests locally, on a Windows machine, against the live service using a local build of the client library. ## Build and install the client library - Clone or download this sample repository. - Open a command prompt window in the folder `sdk\ai\azure-ai-projects` -- If you want to run tests against the latest published client library, install it by running: - ```bash - pip install azure-ai-projects - ``` -- If you want to run tests against a locally built client library: - - First build the wheel: - ```bash - pip install wheel - pip install -r dev_requirements.txt - python setup.py bdist_wheel - ``` - - Then install the resulting local wheel (update version `1.0.0b1` to the current one): - ```bash - pip install dist\azure_ai_projects-1.0.0b1-py3-none-any.whl --user --force-reinstall - ``` +- Install development dependencies: + ```bash + pip install -r dev_requirements.txt + ``` +- Build the package: + ```bash + pip install wheel + python setup.py bdist_wheel + ``` +- Install the resulting wheel (update version `1.0.0b1` to the current one): + ```bash + pip install dist\azure_ai_projects-1.0.0b1-py3-none-any.whl --user --force-reinstall + ``` ## Log in to Azure @@ -28,18 +26,9 @@ The instructions below are for running tests locally, on a Windows machine, agai az login ``` -## Setup for running tests in the `agents` folder +## Setup up environment variables -**Note:** The environment variables required by the test are defined in `agentClientPreparer`. **It is important project name to be the part of environment variable!** For example, the project is `azure_ai_projects` and the variable may be called `azure_ai_projects_connection_string`. The variables without `azure_ai_projects` substrings will be ignored according to logic of `EnvironmentVariableLoader`. The values of these variables will be supplied to kwargs of the unit tests, decorated by `EnvironmentVariableLoader` function. - -```bash -set AZURE_AI_PROJECTS_CONNECTION_STRING= -set AZURE_AI_PROJECTS_DATA_PATH= -``` - -## Setup for running tests in the `evaluations` folder - -## Setup for running tests in the `connections` and `inference` folders +Edit the file `azure_ai_projects_tests.env` located in the folder above. Follow the instructions there on how to set up Azure AI Studio projects to be used for testing, and enter appropriate values for the environment variables used for the tests you want to run. ## Configure test proxy From 4a69706ecf15e208e4ffd75eda557d47c08c5fd1 Mon Sep 17 00:00:00 2001 From: glenn Date: Thu, 14 Nov 2024 19:06:12 -0500 Subject: [PATCH 136/138] add sharepoint sample, update Azure AI search constructor --- .../azure/ai/projects/models/_patch.py | 23 ++---- .../agents/sample_agents_azure_ai_search.py | 20 ++--- .../agents/sample_agents_sharepoint.py | 78 +++++++++++++++++++ 3 files changed, 91 insertions(+), 30 deletions(-) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_sharepoint.py diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 994c80ba60b3..6686aacdd06c 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -510,25 +510,15 @@ class AzureAISearchTool(Tool): A tool that searches for information using Azure AI Search. """ - def __init__(self): - self.index_list = [] - - def add_index(self, index: str, name: str): - """ - Add an index ID to the list of indices used to search. - - :param str index: The index connection id. - :param str name: The index name. - """ - # TODO - self.index_list.append(IndexResource(index_connection_id=index, index_name=name)) + def __init__(self, index_connection_id: str, index_name: str): + self.index_list = [IndexResource(index_connection_id=index_connection_id, index_name=index_name)] @property def definitions(self) -> List[ToolDefinition]: """ Get the Azure AI search tool definitions. - :rtype: List[ToolDefinition] + :return: A list of tool definitions. """ return [AzureAISearchToolDefinition()] @@ -537,13 +527,16 @@ def resources(self) -> ToolResources: """ Get the Azure AI search resources. - :rtype: ToolResources + :return: ToolResources populated with azure_ai_search associated resources. """ return ToolResources(azure_ai_search=AzureAISearchResource(index_list=self.index_list)) def execute(self, tool_call: Any) -> Any: - pass + """ + AI Search tool does not execute client-side. + """ + pass class ConnectionTool(Tool): """ diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py index a85f7432c846..7591ad58d895 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_azure_ai_search.py @@ -9,6 +9,8 @@ DESCRIPTION: This sample demonstrates how to use agent operations with the Azure AI Search tool from the Azure Agents service using a synchronous client. + To learn how to set up an Azure AI Search resource, + visit https://learn.microsoft.com/azure/search/search-get-started-portal USAGE: python sample_agents_azure_ai_search.py @@ -36,30 +38,18 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) -# [START create_agent_with_azure_ai_search_tool] -conn_list = project_client.connections.list() -conn_id = "" -for conn in conn_list: - if conn.connection_type == "CognitiveSearch": - conn_id = conn.id - break - -print(conn_id) - # Initialize agent AI search tool and add the search index connection id -ai_search = AzureAISearchTool() -ai_search.add_index(conn_id, "sample_index") +ai_search = AzureAISearchTool(index_connection_id="myconnectionid", index_name="myindexname") # Create agent with AI search tool and process assistant run with project_client: agent = project_client.agents.create_agent( - model="gpt-4-1106-preview", + model="gpt-4o", name="my-assistant", instructions="You are a helpful assistant", tools=ai_search.definitions, headers={"x-ms-enable-preview": "true"}, ) - # [END create_agent_with_azure_ai_search_tool] print(f"Created agent, ID: {agent.id}") # Create thread for communication @@ -70,7 +60,7 @@ message = project_client.agents.create_message( thread_id=thread.id, role="user", - content="Hello, send an email with the datetime and weather information in New York?", + content="What inventory is available currently?", ) print(f"Created message, ID: {message.id}") diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_sharepoint.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_sharepoint.py new file mode 100644 index 000000000000..1c9f21f0b650 --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_sharepoint.py @@ -0,0 +1,78 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_sharepoint.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with the + Sharepoint tool from the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_sharepoint.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import SharepointTool + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +# Initialize Sharepoint tool with connection id +sharepoint = SharepointTool(connection_id="my_connection_id") + +# Create agent with Sharepoint tool and process assistant run +with project_client: + agent = project_client.agents.create_agent( + model="gpt-4o", + name="my-assistant", + instructions="You are a helpful assistant", + tools=sharepoint.definitions, + headers={"x-ms-enable-preview": "true"}, + ) + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="Hello, tell me about my health insurance options", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}") From 1092c17a9de52c93ac9010ae8104ce06f95d8eef Mon Sep 17 00:00:00 2001 From: glenn Date: Thu, 14 Nov 2024 19:22:58 -0500 Subject: [PATCH 137/138] update fabric skill with new TypeSpec --- .../azure/ai/projects/models/_enums.py | 4 +- .../azure/ai/projects/models/_models.py | 46 +++++++++---------- .../agents/sample_agents_sharepoint.py | 2 +- sdk/ai/azure-ai-projects/tsp-location.yaml | 2 +- 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py index efde56d8624d..014cab2f596b 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_enums.py @@ -39,8 +39,8 @@ class AgentsNamedToolChoiceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Tool type ``file_search``""" BING_GROUNDING = "bing_grounding" """Tool type ``bing_grounding``""" - MICROSOFT_FABRIC = "microsoft_fabric" - """Tool type ``microsoft_fabric``""" + MICROSOFT_FABRIC = "fabric_aiskill" + """Tool type ``fabric_aiskill``""" SHAREPOINT = "sharepoint_grounding" """Tool type ``sharepoint_grounding``""" AZURE_AI_SEARCH = "azure_ai_search" diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py index 5e76e0658f77..8ac0d0d7ad23 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_models.py @@ -223,7 +223,7 @@ class AgentsNamedToolChoice(_model_base.Model): :ivar type: the type of tool. If type is ``function``\\ , the function name must be set. Required. Known values are: "function", "code_interpreter", "file_search", "bing_grounding", - "microsoft_fabric", "sharepoint_grounding", and "azure_ai_search". + "fabric_aiskill", "sharepoint_grounding", and "azure_ai_search". :vartype type: str or ~azure.ai.projects.models.AgentsNamedToolChoiceType :ivar function: The name of the function to call. :vartype function: ~azure.ai.projects.models.FunctionName @@ -232,7 +232,7 @@ class AgentsNamedToolChoice(_model_base.Model): type: Union[str, "_models.AgentsNamedToolChoiceType"] = rest_field() """the type of tool. If type is ``function``, the function name must be set. Required. Known values are: \"function\", \"code_interpreter\", \"file_search\", \"bing_grounding\", - \"microsoft_fabric\", \"sharepoint_grounding\", and \"azure_ai_search\".""" + \"fabric_aiskill\", \"sharepoint_grounding\", and \"azure_ai_search\".""" function: Optional["_models.FunctionName"] = rest_field() """The name of the function to call.""" @@ -2410,28 +2410,28 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="microsoft_fabric"): +class MicrosoftFabricToolDefinition(ToolDefinition, discriminator="fabric_aiskill"): """The input definition information for a Microsoft Fabric tool as used to configure an agent. - :ivar type: The object type, which is always 'microsoft_fabric'. Required. Default value is - "microsoft_fabric". + :ivar type: The object type, which is always 'fabric_aiskill'. Required. Default value is + "fabric_aiskill". :vartype type: str - :ivar microsoft_fabric: The list of connections used by the Microsoft Fabric tool. Required. - :vartype microsoft_fabric: ~azure.ai.projects.models.ToolConnectionList + :ivar fabric_aiskill: The list of connections used by the Microsoft Fabric tool. Required. + :vartype fabric_aiskill: ~azure.ai.projects.models.ToolConnectionList """ - type: Literal["microsoft_fabric"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'microsoft_fabric'. Required. Default value is - \"microsoft_fabric\".""" - microsoft_fabric: "_models.ToolConnectionList" = rest_field() + type: Literal["fabric_aiskill"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'fabric_aiskill'. Required. Default value is + \"fabric_aiskill\".""" + fabric_aiskill: "_models.ToolConnectionList" = rest_field() """The list of connections used by the Microsoft Fabric tool. Required.""" @overload def __init__( self, *, - microsoft_fabric: "_models.ToolConnectionList", + fabric_aiskill: "_models.ToolConnectionList", ) -> None: ... @overload @@ -2442,7 +2442,7 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="microsoft_fabric", **kwargs) + super().__init__(*args, type="fabric_aiskill", **kwargs) class OpenAIFile(_model_base.Model): @@ -2963,7 +2963,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class RequiredToolCall(_model_base.Model): - """An abstract representation a a tool invocation needed by the model to continue a run. + """An abstract representation of a tool invocation needed by the model to continue a run. You probably want to use the sub-classes and not this class directly. Known sub-classes are: RequiredFunctionToolCall @@ -4498,7 +4498,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class RunStepMicrosoftFabricToolCall(RunStepToolCall, discriminator="microsoft_fabric"): +class RunStepMicrosoftFabricToolCall(RunStepToolCall, discriminator="fabric_aiskill"): """A record of a call to a Microsoft Fabric tool, issued by the model in evaluation of a defined tool, that represents executed Microsoft Fabric operations. @@ -4507,17 +4507,17 @@ class RunStepMicrosoftFabricToolCall(RunStepToolCall, discriminator="microsoft_f :ivar id: The ID of the tool call. This ID must be referenced when you submit tool outputs. Required. :vartype id: str - :ivar type: The object type, which is always 'microsoft_fabric'. Required. Default value is - "microsoft_fabric". + :ivar type: The object type, which is always 'fabric_aiskill'. Required. Default value is + "fabric_aiskill". :vartype type: str :ivar microsoft_fabric: Reserved for future use. Required. :vartype microsoft_fabric: dict[str, str] """ - type: Literal["microsoft_fabric"] = rest_discriminator(name="type") # type: ignore - """The object type, which is always 'microsoft_fabric'. Required. Default value is - \"microsoft_fabric\".""" - microsoft_fabric: Dict[str, str] = rest_field() + type: Literal["fabric_aiskill"] = rest_discriminator(name="type") # type: ignore + """The object type, which is always 'fabric_aiskill'. Required. Default value is + \"fabric_aiskill\".""" + microsoft_fabric: Dict[str, str] = rest_field(name="fabric_aiskill") """Reserved for future use. Required.""" @overload @@ -4536,7 +4536,7 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, type="microsoft_fabric", **kwargs) + super().__init__(*args, type="fabric_aiskill", **kwargs) class RunStepSharepointToolCall(RunStepToolCall, discriminator="sharepoint_grounding"): @@ -5198,7 +5198,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class ToolConnectionList(_model_base.Model): """A set of connection resources currently used by either the ``bing_grounding``\\ , - ``microsoft_fabric``\\ , or ``sharepoint_grounding`` tools. + ``fabric_aiskill``\\ , or ``sharepoint_grounding`` tools. :ivar connection_list: The connections attached to this tool. There can be a maximum of 1 connection diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_sharepoint.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_sharepoint.py index 1c9f21f0b650..d2981d07630b 100644 --- a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_sharepoint.py +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_sharepoint.py @@ -36,7 +36,7 @@ conn_str=os.environ["PROJECT_CONNECTION_STRING"], ) -# Initialize Sharepoint tool with connection id +# Initialize Sharepoint tool with connection id sharepoint = SharepointTool(connection_id="my_connection_id") # Create agent with Sharepoint tool and process assistant run diff --git a/sdk/ai/azure-ai-projects/tsp-location.yaml b/sdk/ai/azure-ai-projects/tsp-location.yaml index b32318114a16..a430559f00bc 100644 --- a/sdk/ai/azure-ai-projects/tsp-location.yaml +++ b/sdk/ai/azure-ai-projects/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/ai/Azure.AI.Projects -commit: 17ab26e678f06460cd154e2c0cb187aae158f4bf +commit: 6d7c38caedfcd20f03e77b8628cde9f3bf6f7321 repo: Azure/azure-rest-api-specs additionalDirectories: From 5d1e38c054c652b58c4e1767d07f273acde462ca Mon Sep 17 00:00:00 2001 From: Glenn Harper Date: Fri, 15 Nov 2024 10:18:58 -0800 Subject: [PATCH 138/138] add fabric tool def and sample --- .../azure/ai/projects/models/_patch.py | 16 ++++ .../samples/agents/sample_agents_fabric.py | 80 +++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 sdk/ai/azure-ai-projects/samples/agents/sample_agents_fabric.py diff --git a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py index 6686aacdd06c..f8097ece719a 100644 --- a/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py +++ b/sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py @@ -55,6 +55,7 @@ MessageTextContent, MessageTextFileCitationAnnotation, MessageTextFilePathAnnotation, + MicrosoftFabricToolDefinition, OpenAIPageableListOfThreadMessage, RequiredFunctionToolCall, RunStep, @@ -580,6 +581,20 @@ def definitions(self) -> List[ToolDefinition]: return [BingGroundingToolDefinition(bing_grounding=ToolConnectionList(connection_list=self.connection_ids))] +class FabricTool(ConnectionTool): + """ + A tool that searches for information using Microsoft Fabric. + """ + + @property + def definitions(self) -> List[ToolDefinition]: + """ + Get the Microsoft Fabric tool definitions. + + :rtype: List[ToolDefinition] + """ + return [MicrosoftFabricToolDefinition(fabric_aiskill=ToolConnectionList(connection_list=self.connection_ids))] + class SharepointTool(ConnectionTool): """ A tool that searches for information using Sharepoint. @@ -1425,6 +1440,7 @@ def get_last_text_message_by_sender(self, sender: str) -> Optional[MessageTextCo "FunctionTool", "BingGroundingTool", "SharepointTool", + "FabricTool", "AzureAISearchTool", "SASTokenCredential", "Tool", diff --git a/sdk/ai/azure-ai-projects/samples/agents/sample_agents_fabric.py b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_fabric.py new file mode 100644 index 000000000000..1dd98a17ac5a --- /dev/null +++ b/sdk/ai/azure-ai-projects/samples/agents/sample_agents_fabric.py @@ -0,0 +1,80 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_agents_fabric.py + +DESCRIPTION: + This sample demonstrates how to use agent operations with the Microsoft Fabric grounding tool from + the Azure Agents service using a synchronous client. + +USAGE: + python sample_agents_fabric.py + + Before running the sample: + + pip install azure-ai-projects azure-identity + + Set this environment variables with your own values: + PROJECT_CONNECTION_STRING - the Azure AI Project connection string, as found in your AI Studio Project. +""" + +import os +from azure.ai.projects import AIProjectClient +from azure.identity import DefaultAzureCredential +from azure.ai.projects.models import FabricTool + + +# Create an Azure AI Client from a connection string, copied from your AI Studio project. +# At the moment, it should be in the format ";;;" +# Customer needs to login to Azure subscription via Azure CLI and set the environment variables + +project_client = AIProjectClient.from_connection_string( + credential=DefaultAzureCredential(), + conn_str=os.environ["PROJECT_CONNECTION_STRING"], +) + +conn_id = "your-connection-id" + +# Initialize agent fabric tool and add the connection id +fabric = FabricTool(connection_id=conn_id) + +# Create agent with the fabric tool and process assistant run +with project_client: + agent = project_client.agents.create_agent( + model="gpt-4o", + name="my-assistant", + instructions="You are a helpful assistant", + tools=fabric.definitions, + headers={"x-ms-enable-preview": "true"}, + ) + print(f"Created agent, ID: {agent.id}") + + # Create thread for communication + thread = project_client.agents.create_thread() + print(f"Created thread, ID: {thread.id}") + + # Create message to thread + message = project_client.agents.create_message( + thread_id=thread.id, + role="user", + content="How does wikipedia explain Euler's Identity?", + ) + print(f"Created message, ID: {message.id}") + + # Create and process agent run in thread with tools + run = project_client.agents.create_and_process_run(thread_id=thread.id, assistant_id=agent.id) + print(f"Run finished with status: {run.status}") + + if run.status == "failed": + print(f"Run failed: {run.last_error}") + + # Delete the assistant when done + project_client.agents.delete_agent(agent.id) + print("Deleted agent") + + # Fetch and log all messages + messages = project_client.agents.list_messages(thread_id=thread.id) + print(f"Messages: {messages}")